2024-11-23 13:20:56,538 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 13:20:56,551 main DEBUG Took 0.010523 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 13:20:56,551 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 13:20:56,551 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 13:20:56,552 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 13:20:56,553 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,560 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 13:20:56,571 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,572 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,573 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,573 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,574 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,574 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,575 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,575 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,575 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,575 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,576 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,577 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,577 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,577 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,578 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,578 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,579 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,579 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,579 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,579 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,580 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,580 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,581 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,581 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 13:20:56,581 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,582 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 13:20:56,583 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 13:20:56,584 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 13:20:56,586 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 13:20:56,586 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 13:20:56,587 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 13:20:56,587 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 13:20:56,595 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 13:20:56,597 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 13:20:56,599 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 13:20:56,599 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 13:20:56,600 main DEBUG createAppenders(={Console}) 2024-11-23 13:20:56,600 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-23 13:20:56,601 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-23 13:20:56,601 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-23 13:20:56,601 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 13:20:56,602 main DEBUG OutputStream closed 2024-11-23 13:20:56,602 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 13:20:56,602 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 13:20:56,603 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-23 13:20:56,682 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 13:20:56,685 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 13:20:56,686 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 13:20:56,687 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 13:20:56,688 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 13:20:56,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 13:20:56,689 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 13:20:56,690 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 13:20:56,690 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 13:20:56,691 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 13:20:56,691 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 13:20:56,691 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 13:20:56,692 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 13:20:56,692 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 13:20:56,693 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 13:20:56,693 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 13:20:56,694 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 13:20:56,695 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 13:20:56,698 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 13:20:56,698 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-23 13:20:56,698 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 13:20:56,699 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-23T13:20:56,935 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd 2024-11-23 13:20:56,938 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 13:20:56,938 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T13:20:56,947 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-23T13:20:56,966 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T13:20:56,969 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2, deleteOnExit=true 2024-11-23T13:20:56,970 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-23T13:20:56,970 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/test.cache.data in system properties and HBase conf 2024-11-23T13:20:56,971 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T13:20:56,971 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.log.dir in system properties and HBase conf 2024-11-23T13:20:56,972 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T13:20:56,972 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T13:20:56,973 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-23T13:20:57,081 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T13:20:57,185 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T13:20:57,189 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T13:20:57,190 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T13:20:57,191 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T13:20:57,191 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T13:20:57,192 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T13:20:57,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T13:20:57,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T13:20:57,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T13:20:57,194 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T13:20:57,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/nfs.dump.dir in system properties and HBase conf 2024-11-23T13:20:57,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/java.io.tmpdir in system properties and HBase conf 2024-11-23T13:20:57,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T13:20:57,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T13:20:57,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T13:20:58,016 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T13:20:58,102 INFO [Time-limited test {}] log.Log(170): Logging initialized @2422ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T13:20:58,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T13:20:58,242 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T13:20:58,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T13:20:58,263 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T13:20:58,265 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T13:20:58,277 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T13:20:58,280 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.log.dir/,AVAILABLE} 2024-11-23T13:20:58,282 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T13:20:58,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/java.io.tmpdir/jetty-localhost-46811-hadoop-hdfs-3_4_1-tests_jar-_-any-5652372302518130489/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T13:20:58,493 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:46811} 2024-11-23T13:20:58,494 INFO [Time-limited test {}] server.Server(415): Started @2814ms 2024-11-23T13:20:58,878 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T13:20:58,885 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T13:20:58,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T13:20:58,886 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T13:20:58,886 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T13:20:58,887 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.log.dir/,AVAILABLE} 2024-11-23T13:20:58,888 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T13:20:59,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/java.io.tmpdir/jetty-localhost-40631-hadoop-hdfs-3_4_1-tests_jar-_-any-14193486405966540864/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T13:20:59,010 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:40631} 2024-11-23T13:20:59,010 INFO [Time-limited test {}] server.Server(415): Started @3331ms 2024-11-23T13:20:59,069 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T13:20:59,571 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data2/current/BP-413237464-172.17.0.2-1732368057776/current, will proceed with Du for space computation calculation, 2024-11-23T13:20:59,571 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data1/current/BP-413237464-172.17.0.2-1732368057776/current, will proceed with Du for space computation calculation, 2024-11-23T13:20:59,604 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T13:20:59,655 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb6f79d433bc6fc with lease ID 0x6cab7f1c523a2951: Processing first storage report for DS-17cd8565-3e6e-46e2-b556-7f64d721bab4 from datanode DatanodeRegistration(127.0.0.1:44873, datanodeUuid=678b9c6e-6dcc-4ccd-bcce-5c58caaec29d, infoPort=38569, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=1433346832;c=1732368057776) 2024-11-23T13:20:59,656 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb6f79d433bc6fc with lease ID 0x6cab7f1c523a2951: from storage DS-17cd8565-3e6e-46e2-b556-7f64d721bab4 node DatanodeRegistration(127.0.0.1:44873, datanodeUuid=678b9c6e-6dcc-4ccd-bcce-5c58caaec29d, infoPort=38569, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=1433346832;c=1732368057776), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T13:20:59,656 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bb6f79d433bc6fc with lease ID 0x6cab7f1c523a2951: Processing first storage report for DS-57163731-50d5-412a-b8d1-3cbf2db3016b from datanode DatanodeRegistration(127.0.0.1:44873, datanodeUuid=678b9c6e-6dcc-4ccd-bcce-5c58caaec29d, infoPort=38569, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=1433346832;c=1732368057776) 2024-11-23T13:20:59,656 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bb6f79d433bc6fc with lease ID 0x6cab7f1c523a2951: from storage DS-57163731-50d5-412a-b8d1-3cbf2db3016b node DatanodeRegistration(127.0.0.1:44873, datanodeUuid=678b9c6e-6dcc-4ccd-bcce-5c58caaec29d, infoPort=38569, infoSecurePort=0, ipcPort=37775, storageInfo=lv=-57;cid=testClusterID;nsid=1433346832;c=1732368057776), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T13:20:59,723 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd 2024-11-23T13:20:59,805 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/zookeeper_0, clientPort=51875, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T13:20:59,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=51875 2024-11-23T13:20:59,828 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:20:59,831 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:00,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741825_1001 (size=7) 2024-11-23T13:21:00,469 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 with version=8 2024-11-23T13:21:00,469 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/hbase-staging 2024-11-23T13:21:00,597 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T13:21:00,864 INFO [Time-limited test {}] client.ConnectionUtils(129): master/ba2e440802a7:0 server-side Connection retries=45 2024-11-23T13:21:00,884 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:00,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:00,885 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T13:21:00,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:00,885 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T13:21:01,018 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T13:21:01,078 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T13:21:01,086 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T13:21:01,090 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T13:21:01,117 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22838 (auto-detected) 2024-11-23T13:21:01,118 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T13:21:01,137 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46617 2024-11-23T13:21:01,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:01,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:01,160 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46617 connecting to ZooKeeper ensemble=127.0.0.1:51875 2024-11-23T13:21:01,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:466170x0, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T13:21:01,197 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46617-0x100248a866f0000 connected 2024-11-23T13:21:01,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T13:21:01,230 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T13:21:01,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T13:21:01,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46617 2024-11-23T13:21:01,240 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46617 2024-11-23T13:21:01,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46617 2024-11-23T13:21:01,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46617 2024-11-23T13:21:01,242 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46617 2024-11-23T13:21:01,251 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7, hbase.cluster.distributed=false 2024-11-23T13:21:01,318 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/ba2e440802a7:0 server-side Connection retries=45 2024-11-23T13:21:01,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:01,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:01,319 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T13:21:01,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T13:21:01,319 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T13:21:01,322 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T13:21:01,324 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T13:21:01,325 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33173 2024-11-23T13:21:01,326 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T13:21:01,332 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T13:21:01,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:01,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:01,339 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33173 connecting to ZooKeeper ensemble=127.0.0.1:51875 2024-11-23T13:21:01,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:331730x0, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T13:21:01,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:331730x0, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T13:21:01,343 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33173-0x100248a866f0001 connected 2024-11-23T13:21:01,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T13:21:01,346 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T13:21:01,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-23T13:21:01,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33173 2024-11-23T13:21:01,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33173 2024-11-23T13:21:01,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-23T13:21:01,350 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33173 2024-11-23T13:21:01,352 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/ba2e440802a7,46617,1732368060590 2024-11-23T13:21:01,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T13:21:01,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T13:21:01,360 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ba2e440802a7,46617,1732368060590 2024-11-23T13:21:01,370 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ba2e440802a7:46617 2024-11-23T13:21:01,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T13:21:01,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T13:21:01,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:01,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:01,385 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T13:21:01,386 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ba2e440802a7,46617,1732368060590 from backup master directory 2024-11-23T13:21:01,386 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T13:21:01,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ba2e440802a7,46617,1732368060590 2024-11-23T13:21:01,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T13:21:01,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T13:21:01,390 WARN [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T13:21:01,390 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ba2e440802a7,46617,1732368060590 2024-11-23T13:21:01,393 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T13:21:01,394 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T13:21:01,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741826_1002 (size=42) 2024-11-23T13:21:01,864 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/hbase.id with ID: adddb2c9-dae3-4509-be13-e2a4ac11237d 2024-11-23T13:21:01,910 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T13:21:01,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:01,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:01,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741827_1003 (size=196) 2024-11-23T13:21:01,968 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:21:01,970 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T13:21:01,988 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:01,992 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T13:21:02,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741828_1004 (size=1189) 2024-11-23T13:21:02,442 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store 2024-11-23T13:21:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741829_1005 (size=34) 2024-11-23T13:21:02,862 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T13:21:02,862 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:02,863 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T13:21:02,864 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:21:02,864 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:21:02,864 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T13:21:02,864 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:21:02,864 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:21:02,864 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T13:21:02,866 WARN [master/ba2e440802a7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/.initializing 2024-11-23T13:21:02,867 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/WALs/ba2e440802a7,46617,1732368060590 2024-11-23T13:21:02,873 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T13:21:02,883 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ba2e440802a7%2C46617%2C1732368060590, suffix=, logDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/WALs/ba2e440802a7,46617,1732368060590, archiveDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/oldWALs, maxLogs=10 2024-11-23T13:21:02,905 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/WALs/ba2e440802a7,46617,1732368060590/ba2e440802a7%2C46617%2C1732368060590.1732368062888, exclude list is [], retry=0 2024-11-23T13:21:02,922 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44873,DS-17cd8565-3e6e-46e2-b556-7f64d721bab4,DISK] 2024-11-23T13:21:02,925 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T13:21:02,961 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/WALs/ba2e440802a7,46617,1732368060590/ba2e440802a7%2C46617%2C1732368060590.1732368062888 2024-11-23T13:21:02,961 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38569:38569)] 2024-11-23T13:21:02,962 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:02,962 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:02,965 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:02,966 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,004 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T13:21:03,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:03,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:03,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,038 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T13:21:03,038 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:03,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:03,039 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T13:21:03,042 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:03,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:03,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T13:21:03,046 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:03,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:03,051 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,052 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,060 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T13:21:03,065 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T13:21:03,069 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:03,070 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67585811, jitterRate=0.007107064127922058}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T13:21:03,075 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T13:21:03,076 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T13:21:03,102 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd971f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:03,137 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-23T13:21:03,149 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T13:21:03,149 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T13:21:03,151 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T13:21:03,152 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-23T13:21:03,157 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-23T13:21:03,157 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T13:21:03,188 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T13:21:03,199 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T13:21:03,202 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-23T13:21:03,204 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T13:21:03,205 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T13:21:03,207 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-23T13:21:03,208 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T13:21:03,211 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T13:21:03,213 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-23T13:21:03,214 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T13:21:03,216 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T13:21:03,225 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T13:21:03,227 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T13:21:03,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T13:21:03,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T13:21:03,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,231 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,231 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=ba2e440802a7,46617,1732368060590, sessionid=0x100248a866f0000, setting cluster-up flag (Was=false) 2024-11-23T13:21:03,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,244 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,251 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T13:21:03,253 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ba2e440802a7,46617,1732368060590 2024-11-23T13:21:03,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:03,264 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T13:21:03,265 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ba2e440802a7,46617,1732368060590 2024-11-23T13:21:03,354 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-23T13:21:03,360 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-23T13:21:03,363 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T13:21:03,366 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ba2e440802a7:33173 2024-11-23T13:21:03,368 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1008): ClusterId : adddb2c9-dae3-4509-be13-e2a4ac11237d 2024-11-23T13:21:03,370 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T13:21:03,369 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ba2e440802a7,46617,1732368060590 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T13:21:03,372 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ba2e440802a7:0, corePoolSize=5, maxPoolSize=5 2024-11-23T13:21:03,372 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ba2e440802a7:0, corePoolSize=5, maxPoolSize=5 2024-11-23T13:21:03,373 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ba2e440802a7:0, corePoolSize=5, maxPoolSize=5 2024-11-23T13:21:03,373 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ba2e440802a7:0, corePoolSize=5, maxPoolSize=5 2024-11-23T13:21:03,373 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ba2e440802a7:0, corePoolSize=10, maxPoolSize=10 2024-11-23T13:21:03,373 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,374 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ba2e440802a7:0, corePoolSize=2, maxPoolSize=2 2024-11-23T13:21:03,374 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,375 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732368093375 2024-11-23T13:21:03,375 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T13:21:03,375 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T13:21:03,376 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T13:21:03,378 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T13:21:03,378 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T13:21:03,379 DEBUG [RS:0;ba2e440802a7:33173 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1df8c73f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:03,379 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T13:21:03,379 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-23T13:21:03,381 DEBUG [RS:0;ba2e440802a7:33173 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c38869b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ba2e440802a7/172.17.0.2:0 2024-11-23T13:21:03,381 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T13:21:03,382 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T13:21:03,382 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T13:21:03,382 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T13:21:03,383 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,384 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-23T13:21:03,384 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:03,384 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-23T13:21:03,384 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T13:21:03,384 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-23T13:21:03,384 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T13:21:03,385 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T13:21:03,386 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T13:21:03,387 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(3073): reportForDuty to master=ba2e440802a7,46617,1732368060590 with isa=ba2e440802a7/172.17.0.2:33173, startcode=1732368061317 2024-11-23T13:21:03,388 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T13:21:03,388 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T13:21:03,390 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.large.0-1732368063389,5,FailOnTimeoutGroup] 2024-11-23T13:21:03,391 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.small.0-1732368063390,5,FailOnTimeoutGroup] 2024-11-23T13:21:03,391 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,391 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T13:21:03,393 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,393 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741831_1007 (size=1039) 2024-11-23T13:21:03,400 DEBUG [RS:0;ba2e440802a7:33173 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T13:21:03,433 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37965, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T13:21:03,438 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46617 {}] master.ServerManager(332): Checking decommissioned status of RegionServer ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,441 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46617 {}] master.ServerManager(486): Registering regionserver=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,456 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:03,456 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34115 2024-11-23T13:21:03,456 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-23T13:21:03,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T13:21:03,461 DEBUG [RS:0;ba2e440802a7:33173 {}] zookeeper.ZKUtil(111): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,462 WARN [RS:0;ba2e440802a7:33173 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T13:21:03,462 INFO [RS:0;ba2e440802a7:33173 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T13:21:03,462 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,464 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ba2e440802a7,33173,1732368061317] 2024-11-23T13:21:03,476 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-23T13:21:03,487 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T13:21:03,501 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T13:21:03,503 INFO [RS:0;ba2e440802a7:33173 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T13:21:03,503 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,504 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-23T13:21:03,511 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,512 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,512 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,512 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,512 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ba2e440802a7:0, corePoolSize=2, maxPoolSize=2 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,513 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ba2e440802a7:0, corePoolSize=1, maxPoolSize=1 2024-11-23T13:21:03,514 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ba2e440802a7:0, corePoolSize=3, maxPoolSize=3 2024-11-23T13:21:03,514 DEBUG [RS:0;ba2e440802a7:33173 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0, corePoolSize=3, maxPoolSize=3 2024-11-23T13:21:03,515 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,515 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,515 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,515 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,515 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,33173,1732368061317-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T13:21:03,535 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T13:21:03,537 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,33173,1732368061317-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:03,557 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.Replication(204): ba2e440802a7,33173,1732368061317 started 2024-11-23T13:21:03,557 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1767): Serving as ba2e440802a7,33173,1732368061317, RpcServer on ba2e440802a7/172.17.0.2:33173, sessionid=0x100248a866f0001 2024-11-23T13:21:03,558 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T13:21:03,558 DEBUG [RS:0;ba2e440802a7:33173 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,558 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ba2e440802a7,33173,1732368061317' 2024-11-23T13:21:03,558 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T13:21:03,559 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T13:21:03,560 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T13:21:03,560 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T13:21:03,560 DEBUG [RS:0;ba2e440802a7:33173 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ba2e440802a7,33173,1732368061317 2024-11-23T13:21:03,560 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ba2e440802a7,33173,1732368061317' 2024-11-23T13:21:03,560 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T13:21:03,561 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T13:21:03,562 DEBUG [RS:0;ba2e440802a7:33173 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T13:21:03,562 INFO [RS:0;ba2e440802a7:33173 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T13:21:03,562 INFO [RS:0;ba2e440802a7:33173 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T13:21:03,668 INFO [RS:0;ba2e440802a7:33173 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T13:21:03,671 INFO [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ba2e440802a7%2C33173%2C1732368061317, suffix=, logDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317, archiveDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/oldWALs, maxLogs=32 2024-11-23T13:21:03,689 DEBUG [RS:0;ba2e440802a7:33173 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317/ba2e440802a7%2C33173%2C1732368061317.1732368063674, exclude list is [], retry=0 2024-11-23T13:21:03,694 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44873,DS-17cd8565-3e6e-46e2-b556-7f64d721bab4,DISK] 2024-11-23T13:21:03,698 INFO [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317/ba2e440802a7%2C33173%2C1732368061317.1732368063674 2024-11-23T13:21:03,698 DEBUG [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38569:38569)] 2024-11-23T13:21:03,798 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-23T13:21:03,798 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:03,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741833_1009 (size=32) 2024-11-23T13:21:04,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:04,213 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T13:21:04,216 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T13:21:04,216 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,217 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T13:21:04,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T13:21:04,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T13:21:04,224 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T13:21:04,224 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,226 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740 2024-11-23T13:21:04,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740 2024-11-23T13:21:04,230 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:04,232 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T13:21:04,236 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:04,237 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69142374, jitterRate=0.030301660299301147}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:04,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T13:21:04,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T13:21:04,239 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T13:21:04,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T13:21:04,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T13:21:04,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T13:21:04,241 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T13:21:04,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T13:21:04,243 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-23T13:21:04,243 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-23T13:21:04,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T13:21:04,257 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T13:21:04,259 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T13:21:04,410 DEBUG [ba2e440802a7:46617 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-23T13:21:04,415 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:04,420 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ba2e440802a7,33173,1732368061317, state=OPENING 2024-11-23T13:21:04,426 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T13:21:04,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:04,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:04,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T13:21:04,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T13:21:04,430 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:04,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:04,607 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T13:21:04,610 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T13:21:04,622 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-23T13:21:04,622 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T13:21:04,622 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T13:21:04,625 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ba2e440802a7%2C33173%2C1732368061317.meta, suffix=.meta, logDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317, archiveDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/oldWALs, maxLogs=32 2024-11-23T13:21:04,642 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317/ba2e440802a7%2C33173%2C1732368061317.meta.1732368064627.meta, exclude list is [], retry=0 2024-11-23T13:21:04,646 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44873,DS-17cd8565-3e6e-46e2-b556-7f64d721bab4,DISK] 2024-11-23T13:21:04,649 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/WALs/ba2e440802a7,33173,1732368061317/ba2e440802a7%2C33173%2C1732368061317.meta.1732368064627.meta 2024-11-23T13:21:04,650 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38569:38569)] 2024-11-23T13:21:04,650 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:04,651 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T13:21:04,711 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T13:21:04,716 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T13:21:04,720 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T13:21:04,720 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:04,720 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-23T13:21:04,721 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-23T13:21:04,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T13:21:04,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T13:21:04,726 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T13:21:04,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T13:21:04,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,730 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T13:21:04,732 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T13:21:04,732 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,733 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T13:21:04,734 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740 2024-11-23T13:21:04,737 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740 2024-11-23T13:21:04,740 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:04,743 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-23T13:21:04,744 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62643082, jitterRate=-0.06654533743858337}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:04,746 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-23T13:21:04,753 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732368064599 2024-11-23T13:21:04,764 DEBUG [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T13:21:04,765 INFO [RS_OPEN_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-23T13:21:04,766 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:04,767 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ba2e440802a7,33173,1732368061317, state=OPEN 2024-11-23T13:21:04,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T13:21:04,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T13:21:04,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T13:21:04,773 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T13:21:04,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T13:21:04,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=ba2e440802a7,33173,1732368061317 in 342 msec 2024-11-23T13:21:04,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T13:21:04,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 529 msec 2024-11-23T13:21:04,788 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4850 sec 2024-11-23T13:21:04,788 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732368064788, completionTime=-1 2024-11-23T13:21:04,788 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-23T13:21:04,788 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-23T13:21:04,826 DEBUG [hconnection-0x1ab0a33d-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:04,828 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:04,838 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-23T13:21:04,839 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732368124839 2024-11-23T13:21:04,839 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732368184839 2024-11-23T13:21:04,839 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 50 msec 2024-11-23T13:21:04,860 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:04,860 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:04,861 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:04,862 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ba2e440802a7:46617, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:04,862 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:04,867 DEBUG [master/ba2e440802a7:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-23T13:21:04,870 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-23T13:21:04,871 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T13:21:04,878 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-23T13:21:04,881 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:21:04,882 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:04,884 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:21:04,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741835_1011 (size=358) 2024-11-23T13:21:05,299 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ee2ee0e805ec7a6fa6f5f67efb41c78f, NAME => 'hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:05,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741836_1012 (size=42) 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing ee2ee0e805ec7a6fa6f5f67efb41c78f, disabling compactions & flushes 2024-11-23T13:21:05,309 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. after waiting 0 ms 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,309 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,309 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for ee2ee0e805ec7a6fa6f5f67efb41c78f: 2024-11-23T13:21:05,312 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:21:05,319 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732368065313"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368065313"}]},"ts":"1732368065313"} 2024-11-23T13:21:05,342 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:21:05,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:21:05,347 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368065344"}]},"ts":"1732368065344"} 2024-11-23T13:21:05,351 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-23T13:21:05,358 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ee2ee0e805ec7a6fa6f5f67efb41c78f, ASSIGN}] 2024-11-23T13:21:05,360 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=ee2ee0e805ec7a6fa6f5f67efb41c78f, ASSIGN 2024-11-23T13:21:05,361 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=ee2ee0e805ec7a6fa6f5f67efb41c78f, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:21:05,512 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ee2ee0e805ec7a6fa6f5f67efb41c78f, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:05,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure ee2ee0e805ec7a6fa6f5f67efb41c78f, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:05,670 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:05,676 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,676 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => ee2ee0e805ec7a6fa6f5f67efb41c78f, NAME => 'hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:05,677 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,677 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:05,677 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,677 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,681 INFO [StoreOpener-ee2ee0e805ec7a6fa6f5f67efb41c78f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,683 INFO [StoreOpener-ee2ee0e805ec7a6fa6f5f67efb41c78f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee2ee0e805ec7a6fa6f5f67efb41c78f columnFamilyName info 2024-11-23T13:21:05,683 DEBUG [StoreOpener-ee2ee0e805ec7a6fa6f5f67efb41c78f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:05,684 INFO [StoreOpener-ee2ee0e805ec7a6fa6f5f67efb41c78f-1 {}] regionserver.HStore(327): Store=ee2ee0e805ec7a6fa6f5f67efb41c78f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:05,686 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,686 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,690 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:21:05,693 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:05,694 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened ee2ee0e805ec7a6fa6f5f67efb41c78f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63875291, jitterRate=-0.04818399250507355}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T13:21:05,696 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for ee2ee0e805ec7a6fa6f5f67efb41c78f: 2024-11-23T13:21:05,698 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f., pid=6, masterSystemTime=1732368065670 2024-11-23T13:21:05,701 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,701 INFO [RS_OPEN_PRIORITY_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:21:05,702 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=ee2ee0e805ec7a6fa6f5f67efb41c78f, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:05,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T13:21:05,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure ee2ee0e805ec7a6fa6f5f67efb41c78f, server=ba2e440802a7,33173,1732368061317 in 190 msec 2024-11-23T13:21:05,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T13:21:05,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=ee2ee0e805ec7a6fa6f5f67efb41c78f, ASSIGN in 353 msec 2024-11-23T13:21:05,715 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:21:05,715 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368065715"}]},"ts":"1732368065715"} 2024-11-23T13:21:05,718 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-23T13:21:05,722 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:21:05,725 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 850 msec 2024-11-23T13:21:05,781 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-23T13:21:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-23T13:21:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:05,784 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:21:05,813 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-23T13:21:05,828 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T13:21:05,833 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-11-23T13:21:05,837 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-23T13:21:05,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-23T13:21:05,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-23T13:21:05,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-23T13:21:05,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-23T13:21:05,866 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.475sec 2024-11-23T13:21:05,867 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T13:21:05,868 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T13:21:05,869 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T13:21:05,870 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T13:21:05,870 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T13:21:05,871 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T13:21:05,871 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T13:21:05,877 DEBUG [master/ba2e440802a7:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-23T13:21:05,878 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T13:21:05,879 INFO [master/ba2e440802a7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ba2e440802a7,46617,1732368060590-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T13:21:05,971 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e541e88 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e83c466 2024-11-23T13:21:05,971 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-23T13:21:05,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@305a704f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:05,983 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T13:21:05,983 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T13:21:05,993 DEBUG [hconnection-0x68773b0e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:06,000 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:06,009 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=ba2e440802a7,46617,1732368060590 2024-11-23T13:21:06,024 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=197, ProcessCount=11, AvailableMemoryMB=4437 2024-11-23T13:21:06,035 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:21:06,037 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:21:06,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:21:06,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:21:06,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:06,076 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:21:06,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-23T13:21:06,077 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:06,079 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:21:06,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T13:21:06,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741837_1013 (size=960) 2024-11-23T13:21:06,106 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:06,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741838_1014 (size=53) 2024-11-23T13:21:06,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T13:21:06,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T13:21:06,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:06,518 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 519df349e6147d27e7c8246089c4409f, disabling compactions & flushes 2024-11-23T13:21:06,518 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,518 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,518 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. after waiting 0 ms 2024-11-23T13:21:06,518 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,518 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,518 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:06,520 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:21:06,521 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368066521"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368066521"}]},"ts":"1732368066521"} 2024-11-23T13:21:06,524 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:21:06,526 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:21:06,526 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368066526"}]},"ts":"1732368066526"} 2024-11-23T13:21:06,528 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:21:06,534 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, ASSIGN}] 2024-11-23T13:21:06,536 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, ASSIGN 2024-11-23T13:21:06,537 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:21:06,688 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=519df349e6147d27e7c8246089c4409f, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:06,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T13:21:06,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:06,851 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,851 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:06,851 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,852 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:06,852 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,852 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,854 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,857 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:06,857 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 519df349e6147d27e7c8246089c4409f columnFamilyName A 2024-11-23T13:21:06,857 DEBUG [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:06,858 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(327): Store=519df349e6147d27e7c8246089c4409f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:06,859 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,860 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:06,861 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 519df349e6147d27e7c8246089c4409f columnFamilyName B 2024-11-23T13:21:06,861 DEBUG [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:06,862 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(327): Store=519df349e6147d27e7c8246089c4409f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:06,862 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,864 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:06,864 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 519df349e6147d27e7c8246089c4409f columnFamilyName C 2024-11-23T13:21:06,864 DEBUG [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:06,865 INFO [StoreOpener-519df349e6147d27e7c8246089c4409f-1 {}] regionserver.HStore(327): Store=519df349e6147d27e7c8246089c4409f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:06,865 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,867 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,868 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,871 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:06,874 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:06,878 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:06,879 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 519df349e6147d27e7c8246089c4409f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60553635, jitterRate=-0.09768052399158478}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:06,881 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:06,882 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., pid=11, masterSystemTime=1732368066845 2024-11-23T13:21:06,885 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,885 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:06,886 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=519df349e6147d27e7c8246089c4409f, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:06,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-23T13:21:06,892 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 in 198 msec 2024-11-23T13:21:06,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-23T13:21:06,895 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, ASSIGN in 358 msec 2024-11-23T13:21:06,896 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:21:06,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368066896"}]},"ts":"1732368066896"} 2024-11-23T13:21:06,899 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:21:06,903 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:21:06,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 832 msec 2024-11-23T13:21:07,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-23T13:21:07,201 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-23T13:21:07,205 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f6e36fe to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e98ea32 2024-11-23T13:21:07,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b9fcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,212 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,214 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,217 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:21:07,219 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:21:07,225 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-23T13:21:07,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9bd0964, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22cb07dd to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72b32f98 2024-11-23T13:21:07,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-23T13:21:07,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45b55c24, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,239 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5400112e to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bbb5d8a 2024-11-23T13:21:07,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,244 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38766d64 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18603bb9 2024-11-23T13:21:07,248 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3883f7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,249 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-23T13:21:07,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x70267494 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@490457fd 2024-11-23T13:21:07,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@527c6d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,261 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-23T13:21:07,266 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,267 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c915d17 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f6b07e3 2024-11-23T13:21:07,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@595e9ebe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:07,275 DEBUG [hconnection-0x778c0741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,275 DEBUG [hconnection-0x5dc12106-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,276 DEBUG [hconnection-0x1c8a5105-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,277 DEBUG [hconnection-0x4b800cbf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,278 DEBUG [hconnection-0x6313e8f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,279 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52614, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,280 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,280 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52620, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,280 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,281 DEBUG [hconnection-0x6ee6832c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,281 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:07,287 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,288 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52694, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,288 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,289 DEBUG [hconnection-0x74328c80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:07,289 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52638, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,289 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,292 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52702, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:07,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-23T13:21:07,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:07,303 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:07,305 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:07,307 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:07,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:07,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:07,365 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:07,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,366 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:07,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:07,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:07,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/bd9798d3a6af44f9b2258a8de63909a5 is 50, key is test_row_0/A:col10/1732368067324/Put/seqid=0 2024-11-23T13:21:07,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368127481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368127487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368127487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368127490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368127493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:07,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:07,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741839_1015 (size=12001) 2024-11-23T13:21:07,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/bd9798d3a6af44f9b2258a8de63909a5 2024-11-23T13:21:07,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:07,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/522d936688ed4a6cb83fffc27b67cd19 is 50, key is test_row_0/B:col10/1732368067324/Put/seqid=0 2024-11-23T13:21:07,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368127638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368127638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368127639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368127639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368127639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741840_1016 (size=12001) 2024-11-23T13:21:07,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/522d936688ed4a6cb83fffc27b67cd19 2024-11-23T13:21:07,686 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:07,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:07,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/43615e46523f46bcb56397c7f2f2a0ce is 50, key is test_row_0/C:col10/1732368067324/Put/seqid=0 2024-11-23T13:21:07,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741841_1017 (size=12001) 2024-11-23T13:21:07,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/43615e46523f46bcb56397c7f2f2a0ce 2024-11-23T13:21:07,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/bd9798d3a6af44f9b2258a8de63909a5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5 2024-11-23T13:21:07,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T13:21:07,758 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T13:21:07,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/522d936688ed4a6cb83fffc27b67cd19 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19 2024-11-23T13:21:07,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T13:21:07,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/43615e46523f46bcb56397c7f2f2a0ce as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce 2024-11-23T13:21:07,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce, entries=150, sequenceid=12, filesize=11.7 K 2024-11-23T13:21:07,800 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 519df349e6147d27e7c8246089c4409f in 443ms, sequenceid=12, compaction requested=false 2024-11-23T13:21:07,802 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T13:21:07,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:07,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:07,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T13:21:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:07,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:07,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:07,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:07,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/f0228697838f4b528261e823f6707d8a is 50, key is test_row_0/A:col10/1732368067847/Put/seqid=0 2024-11-23T13:21:07,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368127862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368127865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:07,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368127876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368127879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368127879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741842_1018 (size=14341) 2024-11-23T13:21:07,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:07,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368127982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:07,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:07,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368127986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,007 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368127994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368127997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368127997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,042 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:08,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368128189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,198 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368128196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:08,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:08,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368128212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368128212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368128214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/f0228697838f4b528261e823f6707d8a 2024-11-23T13:21:08,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/efd9da8ea1ab4d62ac34b163a41cd624 is 50, key is test_row_0/B:col10/1732368067847/Put/seqid=0 2024-11-23T13:21:08,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741843_1019 (size=12001) 2024-11-23T13:21:08,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/efd9da8ea1ab4d62ac34b163a41cd624 2024-11-23T13:21:08,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:08,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:08,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f2a551663d8e47e9addf9db8900859d8 is 50, key is test_row_0/C:col10/1732368067847/Put/seqid=0 2024-11-23T13:21:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:08,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741844_1020 (size=12001) 2024-11-23T13:21:08,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f2a551663d8e47e9addf9db8900859d8 2024-11-23T13:21:08,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/f0228697838f4b528261e823f6707d8a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a 2024-11-23T13:21:08,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a, entries=200, sequenceid=39, filesize=14.0 K 2024-11-23T13:21:08,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/efd9da8ea1ab4d62ac34b163a41cd624 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624 2024-11-23T13:21:08,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T13:21:08,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f2a551663d8e47e9addf9db8900859d8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8 2024-11-23T13:21:08,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8, entries=150, sequenceid=39, filesize=11.7 K 2024-11-23T13:21:08,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 519df349e6147d27e7c8246089c4409f in 633ms, sequenceid=39, compaction requested=false 2024-11-23T13:21:08,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:08,511 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-23T13:21:08,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:08,512 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T13:21:08,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:08,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:08,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:08,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:08,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:08,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:08,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:08,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1a9e4aa2578547d0a827c6df99d32698 is 50, key is test_row_0/A:col10/1732368068497/Put/seqid=0 2024-11-23T13:21:08,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:08,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741845_1021 (size=12001) 2024-11-23T13:21:08,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368128590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368128595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368128596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368128595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368128600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368128700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368128705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368128705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368128706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368128708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368128907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368128912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368128912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368128914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:08,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368128913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:08,960 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1a9e4aa2578547d0a827c6df99d32698 2024-11-23T13:21:08,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8128cc2545e44515abd785d04b76ecd7 is 50, key is test_row_0/B:col10/1732368068497/Put/seqid=0 2024-11-23T13:21:09,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741846_1022 (size=12001) 2024-11-23T13:21:09,022 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8128cc2545e44515abd785d04b76ecd7 2024-11-23T13:21:09,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2066f564e2844f31be87412a81ffcae9 is 50, key is test_row_0/C:col10/1732368068497/Put/seqid=0 2024-11-23T13:21:09,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741847_1023 (size=12001) 2024-11-23T13:21:09,103 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2066f564e2844f31be87412a81ffcae9 2024-11-23T13:21:09,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1a9e4aa2578547d0a827c6df99d32698 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698 2024-11-23T13:21:09,137 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T13:21:09,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8128cc2545e44515abd785d04b76ecd7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7 2024-11-23T13:21:09,155 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T13:21:09,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2066f564e2844f31be87412a81ffcae9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9 2024-11-23T13:21:09,173 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9, entries=150, sequenceid=49, filesize=11.7 K 2024-11-23T13:21:09,179 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 519df349e6147d27e7c8246089c4409f in 666ms, sequenceid=49, compaction requested=true 2024-11-23T13:21:09,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:09,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T13:21:09,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-23T13:21:09,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-23T13:21:09,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8750 sec 2024-11-23T13:21:09,189 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.9010 sec 2024-11-23T13:21:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:09,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:09,224 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/69da27dfc1f041789f1d67ebecb85e2c is 50, key is test_row_0/A:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368129228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368129231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368129232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368129237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368129238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741848_1024 (size=14341) 2024-11-23T13:21:09,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/69da27dfc1f041789f1d67ebecb85e2c 2024-11-23T13:21:09,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8d958cd0999c403093a120161e3af535 is 50, key is test_row_0/B:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368129340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368129341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741849_1025 (size=12001) 2024-11-23T13:21:09,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8d958cd0999c403093a120161e3af535 2024-11-23T13:21:09,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368129347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368129350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368129351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/9650542811e5424a98acf163e98bb3d4 is 50, key is test_row_0/C:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741850_1026 (size=12001) 2024-11-23T13:21:09,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/9650542811e5424a98acf163e98bb3d4 2024-11-23T13:21:09,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-23T13:21:09,414 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-23T13:21:09,418 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:09,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-23T13:21:09,422 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:09,423 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:09,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:09,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:09,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/69da27dfc1f041789f1d67ebecb85e2c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c 2024-11-23T13:21:09,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c, entries=200, sequenceid=78, filesize=14.0 K 2024-11-23T13:21:09,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8d958cd0999c403093a120161e3af535 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535 2024-11-23T13:21:09,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:21:09,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/9650542811e5424a98acf163e98bb3d4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4 2024-11-23T13:21:09,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:21:09,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 519df349e6147d27e7c8246089c4409f in 258ms, sequenceid=78, compaction requested=true 2024-11-23T13:21:09,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:09,480 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T13:21:09,482 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-23T13:21:09,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:09,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:09,484 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:09,484 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:09,485 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:09,489 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:09,491 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:09,491 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,492 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.9 K 2024-11-23T13:21:09,495 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 522d936688ed4a6cb83fffc27b67cd19, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732368067324 2024-11-23T13:21:09,495 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52684 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:09,495 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:09,495 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,495 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=51.4 K 2024-11-23T13:21:09,496 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting efd9da8ea1ab4d62ac34b163a41cd624, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732368067485 2024-11-23T13:21:09,497 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8128cc2545e44515abd785d04b76ecd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732368067854 2024-11-23T13:21:09,497 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd9798d3a6af44f9b2258a8de63909a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732368067324 2024-11-23T13:21:09,498 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d958cd0999c403093a120161e3af535, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:09,499 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0228697838f4b528261e823f6707d8a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732368067475 2024-11-23T13:21:09,500 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a9e4aa2578547d0a827c6df99d32698, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732368067854 2024-11-23T13:21:09,501 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69da27dfc1f041789f1d67ebecb85e2c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:09,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:09,543 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#12 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:09,544 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/f788875c844044b5a99e6c603cec027d is 50, key is test_row_0/A:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,555 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#13 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:09,556 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a1ee73a0ae82404e9c09576fc4469959 is 50, key is test_row_0/B:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:09,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:09,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:09,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:09,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:09,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:09,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741851_1027 (size=12139) 2024-11-23T13:21:09,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:09,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:09,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,578 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,591 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/f788875c844044b5a99e6c603cec027d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f788875c844044b5a99e6c603cec027d 2024-11-23T13:21:09,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0aee31dfccc44711a0b0e8c5238b0951 is 50, key is test_row_0/A:col10/1732368069235/Put/seqid=0 2024-11-23T13:21:09,620 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into f788875c844044b5a99e6c603cec027d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:09,620 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:09,620 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368069480; duration=0sec 2024-11-23T13:21:09,620 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:09,621 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:09,621 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:09,628 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:09,628 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:09,628 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,628 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.9 K 2024-11-23T13:21:09,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741852_1028 (size=12139) 2024-11-23T13:21:09,634 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43615e46523f46bcb56397c7f2f2a0ce, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732368067324 2024-11-23T13:21:09,635 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2a551663d8e47e9addf9db8900859d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732368067485 2024-11-23T13:21:09,636 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2066f564e2844f31be87412a81ffcae9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1732368067854 2024-11-23T13:21:09,638 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9650542811e5424a98acf163e98bb3d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:09,646 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a1ee73a0ae82404e9c09576fc4469959 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a1ee73a0ae82404e9c09576fc4469959 2024-11-23T13:21:09,659 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into a1ee73a0ae82404e9c09576fc4469959(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:09,659 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:09,659 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368069484; duration=0sec 2024-11-23T13:21:09,659 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:09,659 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:09,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741853_1029 (size=14337) 2024-11-23T13:21:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368129652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368129654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368129665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,682 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#15 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:09,683 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2137edd728e441929612224219781ee7 is 50, key is test_row_0/C:col10/1732368069220/Put/seqid=0 2024-11-23T13:21:09,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368129671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368129671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741854_1030 (size=12139) 2024-11-23T13:21:09,712 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2137edd728e441929612224219781ee7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2137edd728e441929612224219781ee7 2024-11-23T13:21:09,725 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 2137edd728e441929612224219781ee7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:09,725 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:09,726 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368069485; duration=0sec 2024-11-23T13:21:09,726 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:09,726 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:09,732 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368129773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368129774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368129775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368129787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368129788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:09,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:09,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368129980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368129981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368129984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368129991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:09,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368129992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:10,042 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:10,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:10,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0aee31dfccc44711a0b0e8c5238b0951 2024-11-23T13:21:10,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/0d1ae463c63048efac0b9733e32d65f0 is 50, key is test_row_0/B:col10/1732368069235/Put/seqid=0 2024-11-23T13:21:10,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741855_1031 (size=9657) 2024-11-23T13:21:10,104 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/0d1ae463c63048efac0b9733e32d65f0 2024-11-23T13:21:10,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5d8e936c13cd42f096dc805317619629 is 50, key is test_row_0/C:col10/1732368069235/Put/seqid=0 2024-11-23T13:21:10,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741856_1032 (size=9657) 2024-11-23T13:21:10,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:10,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:10,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,235 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:21:10,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368130286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368130288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368130288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368130299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368130299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,352 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:10,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:10,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,507 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:10,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:10,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:10,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5d8e936c13cd42f096dc805317619629 2024-11-23T13:21:10,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0aee31dfccc44711a0b0e8c5238b0951 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951 2024-11-23T13:21:10,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951, entries=200, sequenceid=89, filesize=14.0 K 2024-11-23T13:21:10,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/0d1ae463c63048efac0b9733e32d65f0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0 2024-11-23T13:21:10,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0, entries=100, sequenceid=89, filesize=9.4 K 2024-11-23T13:21:10,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5d8e936c13cd42f096dc805317619629 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629 2024-11-23T13:21:10,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629, entries=100, sequenceid=89, filesize=9.4 K 2024-11-23T13:21:10,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 519df349e6147d27e7c8246089c4409f in 1034ms, sequenceid=89, compaction requested=false 2024-11-23T13:21:10,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:10,664 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-23T13:21:10,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:10,666 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:10,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:10,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91f31174a6e8419abe51341066780093 is 50, key is test_row_0/A:col10/1732368069642/Put/seqid=0 2024-11-23T13:21:10,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741857_1033 (size=12001) 2024-11-23T13:21:10,708 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91f31174a6e8419abe51341066780093 2024-11-23T13:21:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ec792505ad954d329bc1854c9b391c9b is 50, key is test_row_0/B:col10/1732368069642/Put/seqid=0 2024-11-23T13:21:10,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741858_1034 (size=12001) 2024-11-23T13:21:10,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:10,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:10,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368130804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368130807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368130807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368130808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368130809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368130914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368130915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368130916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:10,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368130917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T13:21:11,076 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T13:21:11,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-23T13:21:11,078 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-23T13:21:11,080 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T13:21:11,080 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T13:21:11,080 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T13:21:11,080 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T13:21:11,081 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:21:11,081 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T13:21:11,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368131119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368131120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368131121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368131122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,152 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ec792505ad954d329bc1854c9b391c9b 2024-11-23T13:21:11,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/4302726451d3429fa7a60fcef7f12aad is 50, key is test_row_0/C:col10/1732368069642/Put/seqid=0 2024-11-23T13:21:11,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741859_1035 (size=12001) 2024-11-23T13:21:11,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368131426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368131428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368131429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368131431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:11,600 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/4302726451d3429fa7a60fcef7f12aad 2024-11-23T13:21:11,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91f31174a6e8419abe51341066780093 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093 2024-11-23T13:21:11,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T13:21:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ec792505ad954d329bc1854c9b391c9b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b 2024-11-23T13:21:11,644 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T13:21:11,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/4302726451d3429fa7a60fcef7f12aad as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad 2024-11-23T13:21:11,659 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T13:21:11,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 519df349e6147d27e7c8246089c4409f in 995ms, sequenceid=118, compaction requested=true 2024-11-23T13:21:11,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:11,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:11,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-23T13:21:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-23T13:21:11,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-23T13:21:11,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2400 sec 2024-11-23T13:21:11,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.2500 sec 2024-11-23T13:21:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:11,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:11,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/49b8beff60174ec0bdb28955ebdabb50 is 50, key is test_row_0/A:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:11,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741860_1036 (size=12051) 2024-11-23T13:21:11,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/49b8beff60174ec0bdb28955ebdabb50 2024-11-23T13:21:11,882 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/35f322c4fd6d4733bf38e53bdca328c1 is 50, key is test_row_0/B:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:11,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741861_1037 (size=12051) 2024-11-23T13:21:11,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/35f322c4fd6d4733bf38e53bdca328c1 2024-11-23T13:21:11,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bf39e173d63b43f9a8d320c01c1c2203 is 50, key is test_row_0/C:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:11,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368131931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368131935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368131936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368131936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:11,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368131938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:11,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741862_1038 (size=12051) 2024-11-23T13:21:11,967 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bf39e173d63b43f9a8d320c01c1c2203 2024-11-23T13:21:11,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/49b8beff60174ec0bdb28955ebdabb50 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50 2024-11-23T13:21:11,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50, entries=150, sequenceid=129, filesize=11.8 K 2024-11-23T13:21:11,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/35f322c4fd6d4733bf38e53bdca328c1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1 2024-11-23T13:21:12,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1, entries=150, sequenceid=129, filesize=11.8 K 2024-11-23T13:21:12,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bf39e173d63b43f9a8d320c01c1c2203 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203 2024-11-23T13:21:12,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203, entries=150, sequenceid=129, filesize=11.8 K 2024-11-23T13:21:12,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 519df349e6147d27e7c8246089c4409f in 212ms, sequenceid=129, compaction requested=true 2024-11-23T13:21:12,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:12,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:12,032 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:12,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:12,032 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:12,036 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45848 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:12,036 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:12,037 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:12,037 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a1ee73a0ae82404e9c09576fc4469959, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=44.8 K 2024-11-23T13:21:12,038 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50528 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:12,038 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:12,038 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:12,038 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f788875c844044b5a99e6c603cec027d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=49.3 K 2024-11-23T13:21:12,038 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a1ee73a0ae82404e9c09576fc4469959, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:12,039 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f788875c844044b5a99e6c603cec027d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:12,040 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d1ae463c63048efac0b9733e32d65f0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732368069235 2024-11-23T13:21:12,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:12,041 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aee31dfccc44711a0b0e8c5238b0951, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732368069235 2024-11-23T13:21:12,041 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ec792505ad954d329bc1854c9b391c9b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368069642 2024-11-23T13:21:12,042 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91f31174a6e8419abe51341066780093, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368069642 2024-11-23T13:21:12,042 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 35f322c4fd6d4733bf38e53bdca328c1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:12,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:21:12,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:12,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,043 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49b8beff60174ec0bdb28955ebdabb50, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:12,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:12,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:12,044 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5766feb26dd74f8081520e2ebbb750b8 is 50, key is test_row_0/A:col10/1732368071899/Put/seqid=0 2024-11-23T13:21:12,073 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#25 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:12,074 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/75e68361ab684e0a8e33e55e5512653c is 50, key is test_row_0/B:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:12,083 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#26 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:12,084 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/da7786b4c2844541aaac75a865ef3e2a is 50, key is test_row_0/A:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:12,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741863_1039 (size=14541) 2024-11-23T13:21:12,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368132087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,092 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5766feb26dd74f8081520e2ebbb750b8 2024-11-23T13:21:12,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741864_1040 (size=12325) 2024-11-23T13:21:12,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741865_1041 (size=12325) 2024-11-23T13:21:12,111 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9994fd6278a84a19ae512cce84d4b199 is 50, key is test_row_0/B:col10/1732368071899/Put/seqid=0 2024-11-23T13:21:12,120 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/75e68361ab684e0a8e33e55e5512653c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/75e68361ab684e0a8e33e55e5512653c 2024-11-23T13:21:12,123 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/da7786b4c2844541aaac75a865ef3e2a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/da7786b4c2844541aaac75a865ef3e2a 2024-11-23T13:21:12,133 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 75e68361ab684e0a8e33e55e5512653c(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:12,133 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:12,134 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368072032; duration=0sec 2024-11-23T13:21:12,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:12,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:12,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:12,137 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45848 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:12,137 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:12,137 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:12,137 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2137edd728e441929612224219781ee7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=44.8 K 2024-11-23T13:21:12,138 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2137edd728e441929612224219781ee7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368068592 2024-11-23T13:21:12,138 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into da7786b4c2844541aaac75a865ef3e2a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:12,138 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:12,138 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368072032; duration=0sec 2024-11-23T13:21:12,138 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d8e936c13cd42f096dc805317619629, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1732368069235 2024-11-23T13:21:12,138 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:12,138 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:12,139 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4302726451d3429fa7a60fcef7f12aad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368069642 2024-11-23T13:21:12,139 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bf39e173d63b43f9a8d320c01c1c2203, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:12,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741866_1042 (size=12151) 2024-11-23T13:21:12,167 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:12,168 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/fac0eee2b23d4742a59416b4307f8041 is 50, key is test_row_0/C:col10/1732368070805/Put/seqid=0 2024-11-23T13:21:12,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741867_1043 (size=12325) 2024-11-23T13:21:12,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368132194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368132399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9994fd6278a84a19ae512cce84d4b199 2024-11-23T13:21:12,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/10effabfd0f04d698fd9af813eabf4da is 50, key is test_row_0/C:col10/1732368071899/Put/seqid=0 2024-11-23T13:21:12,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741868_1044 (size=12151) 2024-11-23T13:21:12,593 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/fac0eee2b23d4742a59416b4307f8041 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/fac0eee2b23d4742a59416b4307f8041 2024-11-23T13:21:12,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/10effabfd0f04d698fd9af813eabf4da 2024-11-23T13:21:12,609 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into fac0eee2b23d4742a59416b4307f8041(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:12,609 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:12,609 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368072033; duration=0sec 2024-11-23T13:21:12,609 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:12,609 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:12,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5766feb26dd74f8081520e2ebbb750b8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8 2024-11-23T13:21:12,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8, entries=200, sequenceid=155, filesize=14.2 K 2024-11-23T13:21:12,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9994fd6278a84a19ae512cce84d4b199 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199 2024-11-23T13:21:12,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199, entries=150, sequenceid=155, filesize=11.9 K 2024-11-23T13:21:12,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/10effabfd0f04d698fd9af813eabf4da as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da 2024-11-23T13:21:12,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da, entries=150, sequenceid=155, filesize=11.9 K 2024-11-23T13:21:12,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 519df349e6147d27e7c8246089c4409f in 613ms, sequenceid=155, compaction requested=false 2024-11-23T13:21:12,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:12,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:12,704 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:12,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:12,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:12,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fdaf06915f854f41bfc28387eebe4575 is 50, key is test_row_0/A:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:12,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741869_1045 (size=12151) 2024-11-23T13:21:12,743 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fdaf06915f854f41bfc28387eebe4575 2024-11-23T13:21:12,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/be046e5f5f754c3bb500e04532cf6cf0 is 50, key is test_row_0/B:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:12,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741870_1046 (size=12151) 2024-11-23T13:21:12,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/be046e5f5f754c3bb500e04532cf6cf0 2024-11-23T13:21:12,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d30458de97fc4e228b7204b6470b18a0 is 50, key is test_row_0/C:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:12,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368132799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741871_1047 (size=12151) 2024-11-23T13:21:12,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368132903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368132945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368132945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368132948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:12,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:12,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368132949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:13,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368133107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d30458de97fc4e228b7204b6470b18a0 2024-11-23T13:21:13,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fdaf06915f854f41bfc28387eebe4575 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575 2024-11-23T13:21:13,242 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T13:21:13,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/be046e5f5f754c3bb500e04532cf6cf0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0 2024-11-23T13:21:13,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T13:21:13,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d30458de97fc4e228b7204b6470b18a0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0 2024-11-23T13:21:13,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T13:21:13,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 519df349e6147d27e7c8246089c4409f in 561ms, sequenceid=169, compaction requested=true 2024-11-23T13:21:13,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:13,267 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:13,267 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:13,269 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:13,269 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:13,269 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,269 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/75e68361ab684e0a8e33e55e5512653c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=35.8 K 2024-11-23T13:21:13,270 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:13,270 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:13,270 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,270 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/da7786b4c2844541aaac75a865ef3e2a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=38.1 K 2024-11-23T13:21:13,270 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 75e68361ab684e0a8e33e55e5512653c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:13,271 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting da7786b4c2844541aaac75a865ef3e2a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:13,271 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9994fd6278a84a19ae512cce84d4b199, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732368071899 2024-11-23T13:21:13,272 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5766feb26dd74f8081520e2ebbb750b8, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732368071899 2024-11-23T13:21:13,272 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting be046e5f5f754c3bb500e04532cf6cf0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:13,273 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdaf06915f854f41bfc28387eebe4575, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:13,288 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#33 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:13,289 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/dae7886733c54fbc839e506bfa54b797 is 50, key is test_row_0/A:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:13,292 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:13,293 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/7ebc1483365a46ffb7c9b846b269095a is 50, key is test_row_0/B:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:13,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741872_1048 (size=12527) 2024-11-23T13:21:13,322 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/dae7886733c54fbc839e506bfa54b797 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/dae7886733c54fbc839e506bfa54b797 2024-11-23T13:21:13,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741873_1049 (size=12527) 2024-11-23T13:21:13,331 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into dae7886733c54fbc839e506bfa54b797(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:13,331 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:13,331 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368073267; duration=0sec 2024-11-23T13:21:13,332 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:13,332 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:13,332 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:13,333 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36627 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:13,333 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:13,334 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,334 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/fac0eee2b23d4742a59416b4307f8041, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=35.8 K 2024-11-23T13:21:13,334 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fac0eee2b23d4742a59416b4307f8041, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368070797 2024-11-23T13:21:13,335 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10effabfd0f04d698fd9af813eabf4da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732368071899 2024-11-23T13:21:13,335 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d30458de97fc4e228b7204b6470b18a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:13,347 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#35 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:13,348 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/9daeeeec0b3b4cc3bb35cf03cb693a43 is 50, key is test_row_0/C:col10/1732368072083/Put/seqid=0 2024-11-23T13:21:13,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741874_1050 (size=12527) 2024-11-23T13:21:13,377 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/9daeeeec0b3b4cc3bb35cf03cb693a43 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9daeeeec0b3b4cc3bb35cf03cb693a43 2024-11-23T13:21:13,387 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 9daeeeec0b3b4cc3bb35cf03cb693a43(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:13,387 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:13,387 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368073267; duration=0sec 2024-11-23T13:21:13,387 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:13,387 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:13,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:13,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:13,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:13,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91bcedab50d9418aa521d1e4cf8b79cc is 50, key is test_row_0/A:col10/1732368073412/Put/seqid=0 2024-11-23T13:21:13,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741875_1051 (size=14541) 2024-11-23T13:21:13,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:13,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368133459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-23T13:21:13,532 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-23T13:21:13,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:13,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-23T13:21:13,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:13,537 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:13,538 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:13,538 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:13,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:13,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368133562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:13,690 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:13,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:13,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,736 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/7ebc1483365a46ffb7c9b846b269095a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/7ebc1483365a46ffb7c9b846b269095a 2024-11-23T13:21:13,746 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 7ebc1483365a46ffb7c9b846b269095a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:13,746 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:13,746 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368073267; duration=0sec 2024-11-23T13:21:13,746 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:13,747 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:13,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:13,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368133766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:13,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:13,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91bcedab50d9418aa521d1e4cf8b79cc 2024-11-23T13:21:13,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:13,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:13,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:13,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:13,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e82fcb68879c436eb1f53d72f88863dd is 50, key is test_row_0/B:col10/1732368073412/Put/seqid=0 2024-11-23T13:21:13,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741876_1052 (size=12151) 2024-11-23T13:21:14,003 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:14,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:14,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368134068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:14,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:14,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,275 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e82fcb68879c436eb1f53d72f88863dd 2024-11-23T13:21:14,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0899b126280c43de96ed6f0818bd7535 is 50, key is test_row_0/C:col10/1732368073412/Put/seqid=0 2024-11-23T13:21:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741877_1053 (size=12151) 2024-11-23T13:21:14,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:14,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,320 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368134572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,627 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:14,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:14,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:14,705 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0899b126280c43de96ed6f0818bd7535 2024-11-23T13:21:14,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/91bcedab50d9418aa521d1e4cf8b79cc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc 2024-11-23T13:21:14,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc, entries=200, sequenceid=197, filesize=14.2 K 2024-11-23T13:21:14,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e82fcb68879c436eb1f53d72f88863dd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd 2024-11-23T13:21:14,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd, entries=150, sequenceid=197, filesize=11.9 K 2024-11-23T13:21:14,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0899b126280c43de96ed6f0818bd7535 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535 2024-11-23T13:21:14,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535, entries=150, sequenceid=197, filesize=11.9 K 2024-11-23T13:21:14,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 519df349e6147d27e7c8246089c4409f in 1335ms, sequenceid=197, compaction requested=false 2024-11-23T13:21:14,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:14,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:14,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-23T13:21:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,783 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T13:21:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:14,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:14,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:14,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:14,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:14,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:14,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0a947b38cb884dec8fc973004f5bf624 is 50, key is test_row_0/A:col10/1732368073454/Put/seqid=0 2024-11-23T13:21:14,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741878_1054 (size=12151) 2024-11-23T13:21:14,809 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0a947b38cb884dec8fc973004f5bf624 2024-11-23T13:21:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c is 50, key is test_row_0/B:col10/1732368073454/Put/seqid=0 2024-11-23T13:21:14,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741879_1055 (size=12151) 2024-11-23T13:21:14,839 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c 2024-11-23T13:21:14,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/27d06dc86d2e4de7b98424b1f124d61e is 50, key is test_row_0/C:col10/1732368073454/Put/seqid=0 2024-11-23T13:21:14,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741880_1056 (size=12151) 2024-11-23T13:21:14,877 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/27d06dc86d2e4de7b98424b1f124d61e 2024-11-23T13:21:14,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0a947b38cb884dec8fc973004f5bf624 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624 2024-11-23T13:21:14,895 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624, entries=150, sequenceid=208, filesize=11.9 K 2024-11-23T13:21:14,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c 2024-11-23T13:21:14,906 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c, entries=150, sequenceid=208, filesize=11.9 K 2024-11-23T13:21:14,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/27d06dc86d2e4de7b98424b1f124d61e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e 2024-11-23T13:21:14,921 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e, entries=150, sequenceid=208, filesize=11.9 K 2024-11-23T13:21:14,923 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 519df349e6147d27e7c8246089c4409f in 140ms, sequenceid=208, compaction requested=true 2024-11-23T13:21:14,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:14,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:14,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T13:21:14,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-23T13:21:14,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-23T13:21:14,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3870 sec 2024-11-23T13:21:14,931 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.3950 sec 2024-11-23T13:21:15,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:15,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:15,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fb994febe70444078dc91bd26647cb52 is 50, key is test_row_0/A:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741881_1057 (size=12147) 2024-11-23T13:21:15,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fb994febe70444078dc91bd26647cb52 2024-11-23T13:21:15,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9260a2735a8443abb84c541bbca544b3 is 50, key is test_row_0/B:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741882_1058 (size=9757) 2024-11-23T13:21:15,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9260a2735a8443abb84c541bbca544b3 2024-11-23T13:21:15,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2807abe5f9754699a49e47fa95672b3c is 50, key is test_row_0/C:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741883_1059 (size=9757) 2024-11-23T13:21:15,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2807abe5f9754699a49e47fa95672b3c 2024-11-23T13:21:15,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fb994febe70444078dc91bd26647cb52 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52 2024-11-23T13:21:15,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T13:21:15,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9260a2735a8443abb84c541bbca544b3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3 2024-11-23T13:21:15,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3, entries=100, sequenceid=221, filesize=9.5 K 2024-11-23T13:21:15,101 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2807abe5f9754699a49e47fa95672b3c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c 2024-11-23T13:21:15,112 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c, entries=100, sequenceid=221, filesize=9.5 K 2024-11-23T13:21:15,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 519df349e6147d27e7c8246089c4409f in 113ms, sequenceid=221, compaction requested=true 2024-11-23T13:21:15,113 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,113 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:15,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,114 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:15,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:15,114 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:15,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:15,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:15,116 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:15,116 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:15,116 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,116 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/7ebc1483365a46ffb7c9b846b269095a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=45.5 K 2024-11-23T13:21:15,116 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51366 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:15,117 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:15,117 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,117 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/dae7886733c54fbc839e506bfa54b797, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=50.2 K 2024-11-23T13:21:15,117 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ebc1483365a46ffb7c9b846b269095a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:15,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dae7886733c54fbc839e506bfa54b797, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:15,118 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e82fcb68879c436eb1f53d72f88863dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732368072767 2024-11-23T13:21:15,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91bcedab50d9418aa521d1e4cf8b79cc, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732368072767 2024-11-23T13:21:15,119 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bf5593c0ae3f4d1aaf5b5b7b88d4869c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732368073419 2024-11-23T13:21:15,119 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a947b38cb884dec8fc973004f5bf624, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732368073419 2024-11-23T13:21:15,120 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9260a2735a8443abb84c541bbca544b3, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368074997 2024-11-23T13:21:15,122 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb994febe70444078dc91bd26647cb52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368074995 2024-11-23T13:21:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:15,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:15,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:15,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:15,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:15,151 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,154 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#45 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:15,156 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a5571bd2c3da4c1eae009727373e08c8 is 50, key is test_row_0/B:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,160 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#46 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:15,162 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/e8de093a701e4a4b83dd0397f1302d8a is 50, key is test_row_0/A:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,166 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fa92163a33a3429d82c1d610a9a39588 is 50, key is test_row_0/A:col10/1732368075146/Put/seqid=0 2024-11-23T13:21:15,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741884_1060 (size=12663) 2024-11-23T13:21:15,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741886_1062 (size=12151) 2024-11-23T13:21:15,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741885_1061 (size=12663) 2024-11-23T13:21:15,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368135581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fa92163a33a3429d82c1d610a9a39588 2024-11-23T13:21:15,612 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a5571bd2c3da4c1eae009727373e08c8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a5571bd2c3da4c1eae009727373e08c8 2024-11-23T13:21:15,623 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/e8de093a701e4a4b83dd0397f1302d8a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e8de093a701e4a4b83dd0397f1302d8a 2024-11-23T13:21:15,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5c8041f59af94feeb22d5f5123a7fdf4 is 50, key is test_row_0/B:col10/1732368075146/Put/seqid=0 2024-11-23T13:21:15,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741887_1063 (size=12151) 2024-11-23T13:21:15,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5c8041f59af94feeb22d5f5123a7fdf4 2024-11-23T13:21:15,640 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into a5571bd2c3da4c1eae009727373e08c8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:15,640 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into e8de093a701e4a4b83dd0397f1302d8a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,640 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368075113; duration=0sec 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:15,640 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368075114; duration=0sec 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,640 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:15,643 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46586 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:15,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:15,644 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,644 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9daeeeec0b3b4cc3bb35cf03cb693a43, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=45.5 K 2024-11-23T13:21:15,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9daeeeec0b3b4cc3bb35cf03cb693a43, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368072075 2024-11-23T13:21:15,645 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0899b126280c43de96ed6f0818bd7535, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1732368072767 2024-11-23T13:21:15,645 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27d06dc86d2e4de7b98424b1f124d61e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1732368073419 2024-11-23T13:21:15,646 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2807abe5f9754699a49e47fa95672b3c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368074997 2024-11-23T13:21:15,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2c489f274926488cb0c6767106ffbe9d is 50, key is test_row_0/C:col10/1732368075146/Put/seqid=0 2024-11-23T13:21:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-23T13:21:15,653 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-23T13:21:15,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:15,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-23T13:21:15,658 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:15,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:15,660 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:15,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:15,679 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#50 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:15,680 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/68fb036f65b54fb88f69771a561b045d is 50, key is test_row_0/C:col10/1732368074997/Put/seqid=0 2024-11-23T13:21:15,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741888_1064 (size=12151) 2024-11-23T13:21:15,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2c489f274926488cb0c6767106ffbe9d 2024-11-23T13:21:15,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/fa92163a33a3429d82c1d610a9a39588 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588 2024-11-23T13:21:15,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T13:21:15,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5c8041f59af94feeb22d5f5123a7fdf4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4 2024-11-23T13:21:15,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T13:21:15,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2c489f274926488cb0c6767106ffbe9d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d 2024-11-23T13:21:15,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d, entries=150, sequenceid=247, filesize=11.9 K 2024-11-23T13:21:15,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741889_1065 (size=12663) 2024-11-23T13:21:15,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 519df349e6147d27e7c8246089c4409f in 581ms, sequenceid=247, compaction requested=false 2024-11-23T13:21:15,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,739 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/68fb036f65b54fb88f69771a561b045d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/68fb036f65b54fb88f69771a561b045d 2024-11-23T13:21:15,748 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 68fb036f65b54fb88f69771a561b045d(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:15,749 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,749 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368075114; duration=0sec 2024-11-23T13:21:15,749 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,749 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:15,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:15,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:15,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:15,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:15,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:15,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:15,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/195ebe2045d04600bd8d2ad7fb86776e is 50, key is test_row_0/A:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:15,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741890_1066 (size=12251) 2024-11-23T13:21:15,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/195ebe2045d04600bd8d2ad7fb86776e 2024-11-23T13:21:15,812 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:15,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3d68828cffb24e6f8f37958efa03bcfd is 50, key is test_row_0/B:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:15,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741891_1067 (size=12251) 2024-11-23T13:21:15,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3d68828cffb24e6f8f37958efa03bcfd 2024-11-23T13:21:15,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/7487fe022d52497789c826a798c87ccf is 50, key is test_row_0/C:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:15,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741892_1068 (size=12251) 2024-11-23T13:21:15,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/7487fe022d52497789c826a798c87ccf 2024-11-23T13:21:15,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/195ebe2045d04600bd8d2ad7fb86776e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e 2024-11-23T13:21:15,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T13:21:15,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3d68828cffb24e6f8f37958efa03bcfd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd 2024-11-23T13:21:15,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T13:21:15,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/7487fe022d52497789c826a798c87ccf as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf 2024-11-23T13:21:15,909 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf, entries=150, sequenceid=262, filesize=12.0 K 2024-11-23T13:21:15,911 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 519df349e6147d27e7c8246089c4409f in 132ms, sequenceid=262, compaction requested=true 2024-11-23T13:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,912 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:15,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,913 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:15,914 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:15,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:15,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:15,914 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:15,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:15,914 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:15,914 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,914 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e8de093a701e4a4b83dd0397f1302d8a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=36.2 K 2024-11-23T13:21:15,915 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e8de093a701e4a4b83dd0397f1302d8a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368073457 2024-11-23T13:21:15,915 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fa92163a33a3429d82c1d610a9a39588, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732368075027 2024-11-23T13:21:15,916 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:15,916 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:15,916 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,916 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a5571bd2c3da4c1eae009727373e08c8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=36.2 K 2024-11-23T13:21:15,917 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 195ebe2045d04600bd8d2ad7fb86776e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:15,917 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5571bd2c3da4c1eae009727373e08c8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368073457 2024-11-23T13:21:15,918 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c8041f59af94feeb22d5f5123a7fdf4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732368075027 2024-11-23T13:21:15,919 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d68828cffb24e6f8f37958efa03bcfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:15,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:21:15,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:15,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:15,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:15,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:15,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:15,940 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:15,940 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0cb4da06a623471fb3be936bcc6d35d3 is 50, key is test_row_0/A:col10/1732368075811/Put/seqid=0 2024-11-23T13:21:15,941 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9c20c904a3504caa8aa7360157d57370 is 50, key is test_row_0/B:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:15,942 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#54 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:15,942 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/a298c9d1130c4112976ef3cfd792a52d is 50, key is test_row_0/A:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:15,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741893_1069 (size=12301) 2024-11-23T13:21:15,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368135950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368135950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368135952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741894_1070 (size=12865) 2024-11-23T13:21:15,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:15,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368135955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741895_1071 (size=12865) 2024-11-23T13:21:15,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:15,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:15,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:15,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:15,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:15,976 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/a298c9d1130c4112976ef3cfd792a52d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a298c9d1130c4112976ef3cfd792a52d 2024-11-23T13:21:15,985 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into a298c9d1130c4112976ef3cfd792a52d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:15,985 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:15,986 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368075912; duration=0sec 2024-11-23T13:21:15,986 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:15,986 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:15,986 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:15,988 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:15,988 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:15,988 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:15,988 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/68fb036f65b54fb88f69771a561b045d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=36.2 K 2024-11-23T13:21:15,989 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 68fb036f65b54fb88f69771a561b045d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368073457 2024-11-23T13:21:15,989 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c489f274926488cb0c6767106ffbe9d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1732368075027 2024-11-23T13:21:15,990 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7487fe022d52497789c826a798c87ccf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:16,003 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#57 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:16,004 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/59ac3f64665a4baeae9c550191a164e5 is 50, key is test_row_0/C:col10/1732368075162/Put/seqid=0 2024-11-23T13:21:16,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741896_1072 (size=12865) 2024-11-23T13:21:16,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368136058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368136059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368136060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368136061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,121 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:16,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:16,265 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368136265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368136266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368136265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368136269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,274 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:16,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0cb4da06a623471fb3be936bcc6d35d3 2024-11-23T13:21:16,373 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/9c20c904a3504caa8aa7360157d57370 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9c20c904a3504caa8aa7360157d57370 2024-11-23T13:21:16,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8b217a7f0fac4853bd65a25c396fec4c is 50, key is test_row_0/B:col10/1732368075811/Put/seqid=0 2024-11-23T13:21:16,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741897_1073 (size=12301) 2024-11-23T13:21:16,390 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 9c20c904a3504caa8aa7360157d57370(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:16,390 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:16,390 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368075913; duration=0sec 2024-11-23T13:21:16,391 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:16,391 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:16,417 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/59ac3f64665a4baeae9c550191a164e5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/59ac3f64665a4baeae9c550191a164e5 2024-11-23T13:21:16,427 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 59ac3f64665a4baeae9c550191a164e5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:16,427 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:16,427 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368075914; duration=0sec 2024-11-23T13:21:16,427 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:16,427 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:16,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:16,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368136568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368136568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368136570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:16,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368136573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,583 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:16,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,739 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:16,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:16,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:16,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8b217a7f0fac4853bd65a25c396fec4c 2024-11-23T13:21:16,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0363a37d34d2411b8f4d8a9905f21b2c is 50, key is test_row_0/C:col10/1732368075811/Put/seqid=0 2024-11-23T13:21:16,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741898_1074 (size=12301) 2024-11-23T13:21:16,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0363a37d34d2411b8f4d8a9905f21b2c 2024-11-23T13:21:16,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0cb4da06a623471fb3be936bcc6d35d3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3 2024-11-23T13:21:16,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T13:21:16,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8b217a7f0fac4853bd65a25c396fec4c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c 2024-11-23T13:21:16,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T13:21:16,840 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0363a37d34d2411b8f4d8a9905f21b2c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c 2024-11-23T13:21:16,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T13:21:16,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 519df349e6147d27e7c8246089c4409f in 920ms, sequenceid=288, compaction requested=false 2024-11-23T13:21:16,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:16,894 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:16,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-23T13:21:16,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:16,896 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-23T13:21:16,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:16,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:16,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/706bfba7433e4161b86d69f575e58016 is 50, key is test_row_1/A:col10/1732368075953/Put/seqid=0 2024-11-23T13:21:16,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741899_1075 (size=9857) 2024-11-23T13:21:16,909 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/706bfba7433e4161b86d69f575e58016 2024-11-23T13:21:16,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/66e974281b0342bb875519fee24a85f3 is 50, key is test_row_1/B:col10/1732368075953/Put/seqid=0 2024-11-23T13:21:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741900_1076 (size=9857) 2024-11-23T13:21:17,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:17,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:17,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368137103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368137105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368137108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368137108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368137210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368137210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368137212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368137217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,330 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/66e974281b0342bb875519fee24a85f3 2024-11-23T13:21:17,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3b9f4095c481432d995ea9998b18a7ba is 50, key is test_row_1/C:col10/1732368075953/Put/seqid=0 2024-11-23T13:21:17,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741901_1077 (size=9857) 2024-11-23T13:21:17,379 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3b9f4095c481432d995ea9998b18a7ba 2024-11-23T13:21:17,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/706bfba7433e4161b86d69f575e58016 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016 2024-11-23T13:21:17,393 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016, entries=100, sequenceid=301, filesize=9.6 K 2024-11-23T13:21:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/66e974281b0342bb875519fee24a85f3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3 2024-11-23T13:21:17,402 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3, entries=100, sequenceid=301, filesize=9.6 K 2024-11-23T13:21:17,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3b9f4095c481432d995ea9998b18a7ba as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba 2024-11-23T13:21:17,410 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba, entries=100, sequenceid=301, filesize=9.6 K 2024-11-23T13:21:17,412 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 519df349e6147d27e7c8246089c4409f in 515ms, sequenceid=301, compaction requested=true 2024-11-23T13:21:17,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:17,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-23T13:21:17,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-23T13:21:17,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-23T13:21:17,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:17,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T13:21:17,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:17,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7530 sec 2024-11-23T13:21:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:17,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,423 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.7650 sec 2024-11-23T13:21:17,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368137425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0336d22c97144cdeb572b62142285625 is 50, key is test_row_0/A:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368137425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368137427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368137427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741902_1078 (size=12301) 2024-11-23T13:21:17,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0336d22c97144cdeb572b62142285625 2024-11-23T13:21:17,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/1bc9e437222f454ab8984f18bce7f4ad is 50, key is test_row_0/B:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741903_1079 (size=12301) 2024-11-23T13:21:17,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/1bc9e437222f454ab8984f18bce7f4ad 2024-11-23T13:21:17,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d59a21c3de2e44089a72ffb745ac554c is 50, key is test_row_0/C:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741904_1080 (size=12301) 2024-11-23T13:21:17,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d59a21c3de2e44089a72ffb745ac554c 2024-11-23T13:21:17,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0336d22c97144cdeb572b62142285625 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625 2024-11-23T13:21:17,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T13:21:17,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368137529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/1bc9e437222f454ab8984f18bce7f4ad as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad 2024-11-23T13:21:17,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368137529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368137533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368137534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T13:21:17,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d59a21c3de2e44089a72ffb745ac554c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c 2024-11-23T13:21:17,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c, entries=150, sequenceid=329, filesize=12.0 K 2024-11-23T13:21:17,550 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(992): StoreScanner already has the close lock. There is no need to updateReaders 2024-11-23T13:21:17,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 519df349e6147d27e7c8246089c4409f in 133ms, sequenceid=329, compaction requested=true 2024-11-23T13:21:17,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:17,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:17,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:17,551 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:17,551 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:17,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:17,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:17,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:17,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:17,554 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47324 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:17,554 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:17,554 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,555 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9c20c904a3504caa8aa7360157d57370, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.2 K 2024-11-23T13:21:17,555 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47324 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:17,555 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c20c904a3504caa8aa7360157d57370, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:17,555 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:17,555 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,556 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a298c9d1130c4112976ef3cfd792a52d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.2 K 2024-11-23T13:21:17,556 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b217a7f0fac4853bd65a25c396fec4c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368075808 2024-11-23T13:21:17,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a298c9d1130c4112976ef3cfd792a52d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:17,557 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0cb4da06a623471fb3be936bcc6d35d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368075808 2024-11-23T13:21:17,557 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 706bfba7433e4161b86d69f575e58016, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732368075949 2024-11-23T13:21:17,558 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 66e974281b0342bb875519fee24a85f3, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732368075949 2024-11-23T13:21:17,559 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0336d22c97144cdeb572b62142285625, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:17,559 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bc9e437222f454ab8984f18bce7f4ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:17,579 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#66 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:17,579 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:17,580 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/f990998c1c7a4d538a106f0f7df56e67 is 50, key is test_row_0/B:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,580 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/867fcee249c74181912408d650e9ac58 is 50, key is test_row_0/A:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:17,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:17,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741906_1082 (size=13051) 2024-11-23T13:21:17,623 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1653cbfd2e4543b6bfbc23ece82e0847 is 50, key is test_row_0/A:col10/1732368077602/Put/seqid=0 2024-11-23T13:21:17,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741905_1081 (size=13051) 2024-11-23T13:21:17,634 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/867fcee249c74181912408d650e9ac58 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/867fcee249c74181912408d650e9ac58 2024-11-23T13:21:17,636 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/f990998c1c7a4d538a106f0f7df56e67 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/f990998c1c7a4d538a106f0f7df56e67 2024-11-23T13:21:17,644 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into 867fcee249c74181912408d650e9ac58(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:17,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:17,644 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368077551; duration=0sec 2024-11-23T13:21:17,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:17,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:17,644 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:17,646 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47324 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:17,646 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:17,646 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,646 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/59ac3f64665a4baeae9c550191a164e5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.2 K 2024-11-23T13:21:17,647 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59ac3f64665a4baeae9c550191a164e5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1732368075162 2024-11-23T13:21:17,648 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0363a37d34d2411b8f4d8a9905f21b2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368075808 2024-11-23T13:21:17,649 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into f990998c1c7a4d538a106f0f7df56e67(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:17,649 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b9f4095c481432d995ea9998b18a7ba, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1732368075949 2024-11-23T13:21:17,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:17,649 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368077551; duration=0sec 2024-11-23T13:21:17,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:17,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:17,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d59a21c3de2e44089a72ffb745ac554c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:17,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741907_1083 (size=14741) 2024-11-23T13:21:17,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1653cbfd2e4543b6bfbc23ece82e0847 2024-11-23T13:21:17,674 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:17,675 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/e527817d9f55437aa8595b72860d810c is 50, key is test_row_0/C:col10/1732368077107/Put/seqid=0 2024-11-23T13:21:17,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8034e0d797e44ebb891e299c623acab8 is 50, key is test_row_0/B:col10/1732368077602/Put/seqid=0 2024-11-23T13:21:17,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368137713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741908_1084 (size=13051) 2024-11-23T13:21:17,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368137732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,736 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368137734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368137736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368137737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741909_1085 (size=12301) 2024-11-23T13:21:17,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8034e0d797e44ebb891e299c623acab8 2024-11-23T13:21:17,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/c9581b0eeb934015b67cd7177f21aae2 is 50, key is test_row_0/C:col10/1732368077602/Put/seqid=0 2024-11-23T13:21:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-23T13:21:17,764 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-23T13:21:17,767 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:17,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-23T13:21:17,769 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:17,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:17,771 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:17,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:17,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741910_1086 (size=12301) 2024-11-23T13:21:17,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368137815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:17,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:17,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T13:21:17,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:17,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:17,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:17,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:17,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:18,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368138018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368138037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368138039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368138041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368138043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:18,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T13:21:18,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:18,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:18,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:18,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:18,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:18,129 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/e527817d9f55437aa8595b72860d810c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e527817d9f55437aa8595b72860d810c 2024-11-23T13:21:18,139 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into e527817d9f55437aa8595b72860d810c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:18,139 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:18,139 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368077552; duration=0sec 2024-11-23T13:21:18,139 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:18,139 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:18,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/c9581b0eeb934015b67cd7177f21aae2 2024-11-23T13:21:18,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1653cbfd2e4543b6bfbc23ece82e0847 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847 2024-11-23T13:21:18,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847, entries=200, sequenceid=340, filesize=14.4 K 2024-11-23T13:21:18,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8034e0d797e44ebb891e299c623acab8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8 2024-11-23T13:21:18,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8, entries=150, sequenceid=340, filesize=12.0 K 2024-11-23T13:21:18,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/c9581b0eeb934015b67cd7177f21aae2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2 2024-11-23T13:21:18,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2, entries=150, sequenceid=340, filesize=12.0 K 2024-11-23T13:21:18,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 519df349e6147d27e7c8246089c4409f in 609ms, sequenceid=340, compaction requested=false 2024-11-23T13:21:18,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:18,233 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:18,234 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:18,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:18,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:18,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/22d550f675ad49899dcd196d09b8048e is 50, key is test_row_0/A:col10/1732368077706/Put/seqid=0 2024-11-23T13:21:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741911_1087 (size=12301) 2024-11-23T13:21:18,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:18,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:18,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368138341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:18,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368138444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368138542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368138546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368138547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368138549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,649 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/22d550f675ad49899dcd196d09b8048e 2024-11-23T13:21:18,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368138648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:18,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e6d7b56e3baa4730b452410f4d160b26 is 50, key is test_row_0/B:col10/1732368077706/Put/seqid=0 2024-11-23T13:21:18,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741912_1088 (size=12301) 2024-11-23T13:21:18,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:18,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368138952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,090 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e6d7b56e3baa4730b452410f4d160b26 2024-11-23T13:21:19,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/aa3bd63b9ede4aa089f75dcaec4d431e is 50, key is test_row_0/C:col10/1732368077706/Put/seqid=0 2024-11-23T13:21:19,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741913_1089 (size=12301) 2024-11-23T13:21:19,109 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/aa3bd63b9ede4aa089f75dcaec4d431e 2024-11-23T13:21:19,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/22d550f675ad49899dcd196d09b8048e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e 2024-11-23T13:21:19,121 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e, entries=150, sequenceid=368, filesize=12.0 K 2024-11-23T13:21:19,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e6d7b56e3baa4730b452410f4d160b26 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26 2024-11-23T13:21:19,129 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26, entries=150, sequenceid=368, filesize=12.0 K 2024-11-23T13:21:19,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/aa3bd63b9ede4aa089f75dcaec4d431e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e 2024-11-23T13:21:19,143 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e, entries=150, sequenceid=368, filesize=12.0 K 2024-11-23T13:21:19,145 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 519df349e6147d27e7c8246089c4409f in 910ms, sequenceid=368, compaction requested=true 2024-11-23T13:21:19,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:19,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:19,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-23T13:21:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-23T13:21:19,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-23T13:21:19,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3750 sec 2024-11-23T13:21:19,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.3840 sec 2024-11-23T13:21:19,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:19,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:19,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:19,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:19,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/93fb3ccbf502425a970bba4895e220c9 is 50, key is test_row_0/A:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741914_1090 (size=9857) 2024-11-23T13:21:19,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/93fb3ccbf502425a970bba4895e220c9 2024-11-23T13:21:19,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/cf70377c0ef845008f0b730493227f62 is 50, key is test_row_0/B:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741915_1091 (size=9857) 2024-11-23T13:21:19,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/cf70377c0ef845008f0b730493227f62 2024-11-23T13:21:19,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3a4b327b386c43ba843cbf5008d5909b is 50, key is test_row_0/C:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368139548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368139551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368139552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368139553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368139552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741916_1092 (size=9857) 2024-11-23T13:21:19,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3a4b327b386c43ba843cbf5008d5909b 2024-11-23T13:21:19,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/93fb3ccbf502425a970bba4895e220c9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9 2024-11-23T13:21:19,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9, entries=100, sequenceid=380, filesize=9.6 K 2024-11-23T13:21:19,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/cf70377c0ef845008f0b730493227f62 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62 2024-11-23T13:21:19,597 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62, entries=100, sequenceid=380, filesize=9.6 K 2024-11-23T13:21:19,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3a4b327b386c43ba843cbf5008d5909b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b 2024-11-23T13:21:19,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b, entries=100, sequenceid=380, filesize=9.6 K 2024-11-23T13:21:19,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 519df349e6147d27e7c8246089c4409f in 150ms, sequenceid=380, compaction requested=true 2024-11-23T13:21:19,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:19,611 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:19,611 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:19,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:19,615 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:19,615 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:19,615 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:19,615 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/f990998c1c7a4d538a106f0f7df56e67, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.4 K 2024-11-23T13:21:19,615 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49950 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:19,616 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:19,616 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:19,616 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/867fcee249c74181912408d650e9ac58, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=48.8 K 2024-11-23T13:21:19,616 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f990998c1c7a4d538a106f0f7df56e67, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:19,617 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 867fcee249c74181912408d650e9ac58, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:19,617 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8034e0d797e44ebb891e299c623acab8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1732368077591 2024-11-23T13:21:19,617 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1653cbfd2e4543b6bfbc23ece82e0847, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1732368077419 2024-11-23T13:21:19,617 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e6d7b56e3baa4730b452410f4d160b26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732368077699 2024-11-23T13:21:19,618 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22d550f675ad49899dcd196d09b8048e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732368077699 2024-11-23T13:21:19,618 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cf70377c0ef845008f0b730493227f62, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368078339 2024-11-23T13:21:19,618 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93fb3ccbf502425a970bba4895e220c9, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368078339 2024-11-23T13:21:19,640 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:19,641 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/92b2c48f4f1e4c9cae5eadbb1d296209 is 50, key is test_row_0/A:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,642 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:19,643 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/40aa7ff75a8e4208b7ac62ddf1655c9b is 50, key is test_row_0/B:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:19,654 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:19,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:19,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741917_1093 (size=13187) 2024-11-23T13:21:19,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/3451f5e8f08a454ab5e7a93bf9aa77f3 is 50, key is test_row_0/A:col10/1732368079543/Put/seqid=0 2024-11-23T13:21:19,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741918_1094 (size=13187) 2024-11-23T13:21:19,684 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/92b2c48f4f1e4c9cae5eadbb1d296209 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/92b2c48f4f1e4c9cae5eadbb1d296209 2024-11-23T13:21:19,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741919_1095 (size=12301) 2024-11-23T13:21:19,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/3451f5e8f08a454ab5e7a93bf9aa77f3 2024-11-23T13:21:19,697 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/40aa7ff75a8e4208b7ac62ddf1655c9b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/40aa7ff75a8e4208b7ac62ddf1655c9b 2024-11-23T13:21:19,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368139699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,710 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into 92b2c48f4f1e4c9cae5eadbb1d296209(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:19,711 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:19,711 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368079611; duration=0sec 2024-11-23T13:21:19,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b7130e5e5a994202ae675c7246b0e760 is 50, key is test_row_0/B:col10/1732368079543/Put/seqid=0 2024-11-23T13:21:19,713 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 40aa7ff75a8e4208b7ac62ddf1655c9b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:19,713 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:19,713 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368079611; duration=0sec 2024-11-23T13:21:19,714 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:19,714 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:19,714 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:19,714 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:19,714 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:19,718 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:19,718 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:19,718 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:19,718 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e527817d9f55437aa8595b72860d810c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=46.4 K 2024-11-23T13:21:19,719 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e527817d9f55437aa8595b72860d810c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1732368077093 2024-11-23T13:21:19,719 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c9581b0eeb934015b67cd7177f21aae2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1732368077591 2024-11-23T13:21:19,720 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa3bd63b9ede4aa089f75dcaec4d431e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1732368077699 2024-11-23T13:21:19,722 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a4b327b386c43ba843cbf5008d5909b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368078339 2024-11-23T13:21:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741920_1096 (size=12301) 2024-11-23T13:21:19,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b7130e5e5a994202ae675c7246b0e760 2024-11-23T13:21:19,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2a11b0853aa84b8d9bb8829836444924 is 50, key is test_row_0/C:col10/1732368079543/Put/seqid=0 2024-11-23T13:21:19,753 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:19,753 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/6350d9e6ad0a4b6fbb094983aa6d3425 is 50, key is test_row_0/C:col10/1732368079459/Put/seqid=0 2024-11-23T13:21:19,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741921_1097 (size=12301) 2024-11-23T13:21:19,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741922_1098 (size=13187) 2024-11-23T13:21:19,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2a11b0853aa84b8d9bb8829836444924 2024-11-23T13:21:19,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/3451f5e8f08a454ab5e7a93bf9aa77f3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3 2024-11-23T13:21:19,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T13:21:19,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b7130e5e5a994202ae675c7246b0e760 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760 2024-11-23T13:21:19,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T13:21:19,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368139804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:19,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2a11b0853aa84b8d9bb8829836444924 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924 2024-11-23T13:21:19,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924, entries=150, sequenceid=405, filesize=12.0 K 2024-11-23T13:21:19,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 519df349e6147d27e7c8246089c4409f in 163ms, sequenceid=405, compaction requested=false 2024-11-23T13:21:19,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:19,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-23T13:21:19,877 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-23T13:21:19,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-23T13:21:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:19,882 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:19,890 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:19,890 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:20,012 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:20,012 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:20,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:20,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:20,013 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/96268a4b6e654bca9cf778b462b94fe4 is 50, key is test_row_0/A:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741923_1099 (size=14741) 2024-11-23T13:21:20,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/96268a4b6e654bca9cf778b462b94fe4 2024-11-23T13:21:20,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T13:21:20,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:20,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,044 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/aa003a7ddb4e46aeaa55bfdd12679577 is 50, key is test_row_0/B:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741924_1100 (size=12301) 2024-11-23T13:21:20,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/aa003a7ddb4e46aeaa55bfdd12679577 2024-11-23T13:21:20,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:20,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368140109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,123 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f1f5ed0187064a67ad7bfc99a74cd996 is 50, key is test_row_0/C:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741925_1101 (size=12301) 2024-11-23T13:21:20,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:20,186 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/6350d9e6ad0a4b6fbb094983aa6d3425 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/6350d9e6ad0a4b6fbb094983aa6d3425 2024-11-23T13:21:20,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T13:21:20,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,198 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 6350d9e6ad0a4b6fbb094983aa6d3425(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:20,198 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:20,198 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368079611; duration=0sec 2024-11-23T13:21:20,199 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:20,199 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:20,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:20,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:20,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368140217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,353 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T13:21:20,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:20,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:20,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368140420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:20,506 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T13:21:20,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:20,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:20,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f1f5ed0187064a67ad7bfc99a74cd996 2024-11-23T13:21:20,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/96268a4b6e654bca9cf778b462b94fe4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4 2024-11-23T13:21:20,555 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4, entries=200, sequenceid=419, filesize=14.4 K 2024-11-23T13:21:20,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/aa003a7ddb4e46aeaa55bfdd12679577 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577 2024-11-23T13:21:20,562 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577, entries=150, sequenceid=419, filesize=12.0 K 2024-11-23T13:21:20,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/f1f5ed0187064a67ad7bfc99a74cd996 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996 2024-11-23T13:21:20,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996, entries=150, sequenceid=419, filesize=12.0 K 2024-11-23T13:21:20,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 519df349e6147d27e7c8246089c4409f in 558ms, sequenceid=419, compaction requested=true 2024-11-23T13:21:20,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:20,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:20,570 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:20,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:20,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:20,571 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:20,572 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:20,572 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:20,572 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,572 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/92b2c48f4f1e4c9cae5eadbb1d296209, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=39.3 K 2024-11-23T13:21:20,573 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:20,573 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:20,573 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,573 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/40aa7ff75a8e4208b7ac62ddf1655c9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=36.9 K 2024-11-23T13:21:20,574 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 40aa7ff75a8e4208b7ac62ddf1655c9b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368077699 2024-11-23T13:21:20,574 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92b2c48f4f1e4c9cae5eadbb1d296209, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368077699 2024-11-23T13:21:20,574 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b7130e5e5a994202ae675c7246b0e760, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732368079543 2024-11-23T13:21:20,574 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3451f5e8f08a454ab5e7a93bf9aa77f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732368079543 2024-11-23T13:21:20,575 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96268a4b6e654bca9cf778b462b94fe4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:20,575 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting aa003a7ddb4e46aeaa55bfdd12679577, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:20,588 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:20,589 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:20,589 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/4fb9a51993f44b6084ccdf182faa25cb is 50, key is test_row_0/A:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,590 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8059ac14dd6045ddb9876b3bc0e43347 is 50, key is test_row_0/B:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741926_1102 (size=13289) 2024-11-23T13:21:20,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741927_1103 (size=13289) 2024-11-23T13:21:20,617 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/8059ac14dd6045ddb9876b3bc0e43347 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8059ac14dd6045ddb9876b3bc0e43347 2024-11-23T13:21:20,626 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 8059ac14dd6045ddb9876b3bc0e43347(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:20,626 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:20,626 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368080570; duration=0sec 2024-11-23T13:21:20,626 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:20,626 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:20,626 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:20,627 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:20,628 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:20,628 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,628 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/6350d9e6ad0a4b6fbb094983aa6d3425, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=36.9 K 2024-11-23T13:21:20,629 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 6350d9e6ad0a4b6fbb094983aa6d3425, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=380, earliestPutTs=1732368077699 2024-11-23T13:21:20,629 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a11b0853aa84b8d9bb8829836444924, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732368079543 2024-11-23T13:21:20,630 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f1f5ed0187064a67ad7bfc99a74cd996, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:20,642 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#89 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:20,643 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/49443438957a4e2fb01fecd95fa9942c is 50, key is test_row_0/C:col10/1732368079669/Put/seqid=0 2024-11-23T13:21:20,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-23T13:21:20,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:20,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:20,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:20,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0046e7f3d090429ba6fecc266786cb98 is 50, key is test_row_0/A:col10/1732368080090/Put/seqid=0 2024-11-23T13:21:20,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741928_1104 (size=13289) 2024-11-23T13:21:20,674 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/49443438957a4e2fb01fecd95fa9942c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49443438957a4e2fb01fecd95fa9942c 2024-11-23T13:21:20,681 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 49443438957a4e2fb01fecd95fa9942c(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:20,681 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:20,681 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368080571; duration=0sec 2024-11-23T13:21:20,681 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:20,681 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:20,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741929_1105 (size=12301) 2024-11-23T13:21:20,698 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0046e7f3d090429ba6fecc266786cb98 2024-11-23T13:21:20,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/50a500468a6f42fe9cff14bd6ac913bc is 50, key is test_row_0/B:col10/1732368080090/Put/seqid=0 2024-11-23T13:21:20,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741930_1106 (size=12301) 2024-11-23T13:21:20,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:20,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:20,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:20,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368140747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:20,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368140851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:20,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:21,014 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/4fb9a51993f44b6084ccdf182faa25cb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/4fb9a51993f44b6084ccdf182faa25cb 2024-11-23T13:21:21,025 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into 4fb9a51993f44b6084ccdf182faa25cb(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:21,025 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,025 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368080570; duration=0sec 2024-11-23T13:21:21,026 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:21,026 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368141053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,121 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/50a500468a6f42fe9cff14bd6ac913bc 2024-11-23T13:21:21,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3895c6dc78204a8285cf1e561ea237e1 is 50, key is test_row_0/C:col10/1732368080090/Put/seqid=0 2024-11-23T13:21:21,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741931_1107 (size=12301) 2024-11-23T13:21:21,138 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3895c6dc78204a8285cf1e561ea237e1 2024-11-23T13:21:21,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0046e7f3d090429ba6fecc266786cb98 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98 2024-11-23T13:21:21,151 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98, entries=150, sequenceid=445, filesize=12.0 K 2024-11-23T13:21:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/50a500468a6f42fe9cff14bd6ac913bc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc 2024-11-23T13:21:21,158 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc, entries=150, sequenceid=445, filesize=12.0 K 2024-11-23T13:21:21,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/3895c6dc78204a8285cf1e561ea237e1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1 2024-11-23T13:21:21,164 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1, entries=150, sequenceid=445, filesize=12.0 K 2024-11-23T13:21:21,166 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 519df349e6147d27e7c8246089c4409f in 504ms, sequenceid=445, compaction requested=false 2024-11-23T13:21:21,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:21,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-23T13:21:21,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-23T13:21:21,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T13:21:21,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2770 sec 2024-11-23T13:21:21,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2900 sec 2024-11-23T13:21:21,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:21,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:21:21,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:21,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:21,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:21,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:21,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:21,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:21,363 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c538c76c9e1540f1996c23e7373f2ec2 is 50, key is test_row_0/A:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741932_1108 (size=12301) 2024-11-23T13:21:21,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c538c76c9e1540f1996c23e7373f2ec2 2024-11-23T13:21:21,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5d8f3e3ac68744369a0b99aa87f63892 is 50, key is test_row_0/B:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741933_1109 (size=12301) 2024-11-23T13:21:21,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368141412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 288 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368141515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368141562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368141562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,564 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4140 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:21,564 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:21,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368141564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,567 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4139 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:21,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368141573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,577 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:21,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:21,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368141718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:21,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5d8f3e3ac68744369a0b99aa87f63892 2024-11-23T13:21:21,808 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/ec674a71cebc4c57aa5a5bc971df3ba8 is 50, key is test_row_0/C:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741934_1110 (size=12301) 2024-11-23T13:21:21,813 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/ec674a71cebc4c57aa5a5bc971df3ba8 2024-11-23T13:21:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c538c76c9e1540f1996c23e7373f2ec2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2 2024-11-23T13:21:21,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T13:21:21,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5d8f3e3ac68744369a0b99aa87f63892 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892 2024-11-23T13:21:21,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T13:21:21,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/ec674a71cebc4c57aa5a5bc971df3ba8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8 2024-11-23T13:21:21,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8, entries=150, sequenceid=460, filesize=12.0 K 2024-11-23T13:21:21,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 519df349e6147d27e7c8246089c4409f in 482ms, sequenceid=460, compaction requested=true 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:21,839 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:21,839 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:21,840 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:21,840 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:21,840 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:21,840 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:21,841 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:21,841 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:21,841 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8059ac14dd6045ddb9876b3bc0e43347, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.0 K 2024-11-23T13:21:21,841 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/4fb9a51993f44b6084ccdf182faa25cb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.0 K 2024-11-23T13:21:21,841 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8059ac14dd6045ddb9876b3bc0e43347, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:21,841 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fb9a51993f44b6084ccdf182faa25cb, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:21,842 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0046e7f3d090429ba6fecc266786cb98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732368080090 2024-11-23T13:21:21,842 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 50a500468a6f42fe9cff14bd6ac913bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732368080090 2024-11-23T13:21:21,842 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d8f3e3ac68744369a0b99aa87f63892, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:21,842 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c538c76c9e1540f1996c23e7373f2ec2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:21,853 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:21,854 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/0a72a36c3e424fb49a7b3c212654fe8c is 50, key is test_row_0/B:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,860 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#97 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:21,861 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/e53dcc542b894b9495f8f9f951170274 is 50, key is test_row_0/A:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741936_1112 (size=13391) 2024-11-23T13:21:21,877 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/e53dcc542b894b9495f8f9f951170274 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e53dcc542b894b9495f8f9f951170274 2024-11-23T13:21:21,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741935_1111 (size=13391) 2024-11-23T13:21:21,886 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into e53dcc542b894b9495f8f9f951170274(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:21,886 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,886 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368081839; duration=0sec 2024-11-23T13:21:21,886 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:21,886 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:21,886 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:21,887 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/0a72a36c3e424fb49a7b3c212654fe8c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0a72a36c3e424fb49a7b3c212654fe8c 2024-11-23T13:21:21,888 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:21,888 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:21,888 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:21,888 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49443438957a4e2fb01fecd95fa9942c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.0 K 2024-11-23T13:21:21,888 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49443438957a4e2fb01fecd95fa9942c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1732368079669 2024-11-23T13:21:21,889 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3895c6dc78204a8285cf1e561ea237e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1732368080090 2024-11-23T13:21:21,889 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ec674a71cebc4c57aa5a5bc971df3ba8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:21,893 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 0a72a36c3e424fb49a7b3c212654fe8c(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:21,893 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,893 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368081839; duration=0sec 2024-11-23T13:21:21,893 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:21,893 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:21,900 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#98 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:21,901 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/cd9b2dbef82c4df897fd39f7a0b1cdd8 is 50, key is test_row_0/C:col10/1732368081356/Put/seqid=0 2024-11-23T13:21:21,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741937_1113 (size=13391) 2024-11-23T13:21:21,914 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/cd9b2dbef82c4df897fd39f7a0b1cdd8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/cd9b2dbef82c4df897fd39f7a0b1cdd8 2024-11-23T13:21:21,921 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into cd9b2dbef82c4df897fd39f7a0b1cdd8(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:21,921 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:21,921 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368081839; duration=0sec 2024-11-23T13:21:21,922 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:21,922 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:21,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-23T13:21:21,986 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-23T13:21:21,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:21,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-23T13:21:21,990 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:21,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:21,990 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:21,991 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:22,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:22,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:22,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,032 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/06283acd2e4e4bae81408ece19e5fc69 is 50, key is test_row_0/A:col10/1732368081407/Put/seqid=0 2024-11-23T13:21:22,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741938_1114 (size=14741) 2024-11-23T13:21:22,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/06283acd2e4e4bae81408ece19e5fc69 2024-11-23T13:21:22,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:22,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368142046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b969eec4eb4b4475bceb74052af54764 is 50, key is test_row_0/B:col10/1732368081407/Put/seqid=0 2024-11-23T13:21:22,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741939_1115 (size=12301) 2024-11-23T13:21:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:22,142 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:22,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368142151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:22,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,297 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:22,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368142353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,450 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:22,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b969eec4eb4b4475bceb74052af54764 2024-11-23T13:21:22,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/225fa415b96f4029a3ec7e3fce7713e5 is 50, key is test_row_0/C:col10/1732368081407/Put/seqid=0 2024-11-23T13:21:22,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741940_1116 (size=12301) 2024-11-23T13:21:22,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:22,603 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:22,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,604 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:22,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368142656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,756 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:22,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,757 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:22,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=487 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/225fa415b96f4029a3ec7e3fce7713e5 2024-11-23T13:21:22,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/06283acd2e4e4bae81408ece19e5fc69 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69 2024-11-23T13:21:22,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69, entries=200, sequenceid=487, filesize=14.4 K 2024-11-23T13:21:22,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b969eec4eb4b4475bceb74052af54764 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764 2024-11-23T13:21:22,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764, entries=150, sequenceid=487, filesize=12.0 K 2024-11-23T13:21:22,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/225fa415b96f4029a3ec7e3fce7713e5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5 2024-11-23T13:21:22,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5, entries=150, sequenceid=487, filesize=12.0 K 2024-11-23T13:21:22,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 519df349e6147d27e7c8246089c4409f in 882ms, sequenceid=487, compaction requested=false 2024-11-23T13:21:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:22,910 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:22,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-23T13:21:22,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:22,911 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:22,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:22,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 is 50, key is test_row_0/A:col10/1732368082036/Put/seqid=0 2024-11-23T13:21:22,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741941_1117 (size=12301) 2024-11-23T13:21:22,924 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 2024-11-23T13:21:22,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/6e89716bda2649bd8fd0c5d2b39d9ac9 is 50, key is test_row_0/B:col10/1732368082036/Put/seqid=0 2024-11-23T13:21:22,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741942_1118 (size=12301) 2024-11-23T13:21:23,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:23,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:23,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:23,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:23,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368143212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:23,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:23,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 333 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368143315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:23,338 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/6e89716bda2649bd8fd0c5d2b39d9ac9 2024-11-23T13:21:23,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/49f040bc467d413c9360fe3411983357 is 50, key is test_row_0/C:col10/1732368082036/Put/seqid=0 2024-11-23T13:21:23,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741943_1119 (size=12301) 2024-11-23T13:21:23,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:23,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 335 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368143522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:23,752 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/49f040bc467d413c9360fe3411983357 2024-11-23T13:21:23,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 2024-11-23T13:21:23,763 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T13:21:23,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/6e89716bda2649bd8fd0c5d2b39d9ac9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9 2024-11-23T13:21:23,769 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T13:21:23,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/49f040bc467d413c9360fe3411983357 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357 2024-11-23T13:21:23,775 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357, entries=150, sequenceid=499, filesize=12.0 K 2024-11-23T13:21:23,776 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 519df349e6147d27e7c8246089c4409f in 866ms, sequenceid=499, compaction requested=true 2024-11-23T13:21:23,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:23,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:23,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-23T13:21:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-23T13:21:23,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-23T13:21:23,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7880 sec 2024-11-23T13:21:23,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.7940 sec 2024-11-23T13:21:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:23,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:21:23,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:23,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:23,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:23,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:23,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:23,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:23,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/8ec3513390be49b8bb2a35b68d2ebf73 is 50, key is test_row_0/A:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:23,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741944_1120 (size=14741) 2024-11-23T13:21:23,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:23,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 346 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368143848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:23,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:23,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 348 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368143950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-23T13:21:24,094 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-23T13:21:24,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:24,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-23T13:21:24,098 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:24,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T13:21:24,098 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:24,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:24,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:24,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 350 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368144153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T13:21:24,238 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/8ec3513390be49b8bb2a35b68d2ebf73 2024-11-23T13:21:24,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ac31712ee5f249a9bbdf1c881cc22a49 is 50, key is test_row_0/B:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:24,250 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T13:21:24,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:24,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741945_1121 (size=12301) 2024-11-23T13:21:24,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ac31712ee5f249a9bbdf1c881cc22a49 2024-11-23T13:21:24,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/40c8bfcf79274dc0bd9924bfdf7b47f3 is 50, key is test_row_0/C:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:24,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741946_1122 (size=12301) 2024-11-23T13:21:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T13:21:24,404 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T13:21:24,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:24,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 352 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368144458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,557 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T13:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:24,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/40c8bfcf79274dc0bd9924bfdf7b47f3 2024-11-23T13:21:24,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/8ec3513390be49b8bb2a35b68d2ebf73 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73 2024-11-23T13:21:24,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73, entries=200, sequenceid=524, filesize=14.4 K 2024-11-23T13:21:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/ac31712ee5f249a9bbdf1c881cc22a49 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49 2024-11-23T13:21:24,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49, entries=150, sequenceid=524, filesize=12.0 K 2024-11-23T13:21:24,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/40c8bfcf79274dc0bd9924bfdf7b47f3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3 2024-11-23T13:21:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T13:21:24,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3, entries=150, sequenceid=524, filesize=12.0 K 2024-11-23T13:21:24,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 519df349e6147d27e7c8246089c4409f in 878ms, sequenceid=524, compaction requested=true 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:24,705 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:24,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:21:24,706 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:24,707 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55174 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:24,707 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:24,707 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:24,707 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:24,707 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,707 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,707 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0a72a36c3e424fb49a7b3c212654fe8c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=49.1 K 2024-11-23T13:21:24,707 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e53dcc542b894b9495f8f9f951170274, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=53.9 K 2024-11-23T13:21:24,708 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e53dcc542b894b9495f8f9f951170274, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:24,708 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a72a36c3e424fb49a7b3c212654fe8c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:24,708 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b969eec4eb4b4475bceb74052af54764, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732368081402 2024-11-23T13:21:24,708 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06283acd2e4e4bae81408ece19e5fc69, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732368081402 2024-11-23T13:21:24,709 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e89716bda2649bd8fd0c5d2b39d9ac9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732368082036 2024-11-23T13:21:24,710 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:24,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-23T13:21:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,711 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:24,709 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ed7a4aa0bac4fce9ff12dbfaeab8785, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732368082036 2024-11-23T13:21:24,709 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ac31712ee5f249a9bbdf1c881cc22a49, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,713 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ec3513390be49b8bb2a35b68d2ebf73, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:24,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/cd7970f38a40452d96f952168a8e74d1 is 50, key is test_row_0/A:col10/1732368083846/Put/seqid=0 2024-11-23T13:21:24,734 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#109 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:24,735 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/669ba17311624c4084772f709e21a41e is 50, key is test_row_0/B:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:24,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741947_1123 (size=12301) 2024-11-23T13:21:24,739 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/cd7970f38a40452d96f952168a8e74d1 2024-11-23T13:21:24,742 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#110 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:24,743 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/a9d2b6801e524e29a57c571aa15a90b2 is 50, key is test_row_0/A:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:24,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741948_1124 (size=13527) 2024-11-23T13:21:24,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5840ccf2cfb54fa6b6d34348f6f95192 is 50, key is test_row_0/B:col10/1732368083846/Put/seqid=0 2024-11-23T13:21:24,769 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/669ba17311624c4084772f709e21a41e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/669ba17311624c4084772f709e21a41e 2024-11-23T13:21:24,777 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 669ba17311624c4084772f709e21a41e(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:24,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:24,777 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=12, startTime=1732368084705; duration=0sec 2024-11-23T13:21:24,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:24,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:24,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:24,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741949_1125 (size=13527) 2024-11-23T13:21:24,780 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50294 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:24,780 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:24,780 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,781 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/cd9b2dbef82c4df897fd39f7a0b1cdd8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=49.1 K 2024-11-23T13:21:24,781 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cd9b2dbef82c4df897fd39f7a0b1cdd8, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1732368080736 2024-11-23T13:21:24,782 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 225fa415b96f4029a3ec7e3fce7713e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=487, earliestPutTs=1732368081402 2024-11-23T13:21:24,785 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 49f040bc467d413c9360fe3411983357, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1732368082036 2024-11-23T13:21:24,786 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 40c8bfcf79274dc0bd9924bfdf7b47f3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:24,792 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/a9d2b6801e524e29a57c571aa15a90b2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a9d2b6801e524e29a57c571aa15a90b2 2024-11-23T13:21:24,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741950_1126 (size=12301) 2024-11-23T13:21:24,804 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5840ccf2cfb54fa6b6d34348f6f95192 2024-11-23T13:21:24,807 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into a9d2b6801e524e29a57c571aa15a90b2(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:24,807 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:24,807 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=12, startTime=1732368084705; duration=0sec 2024-11-23T13:21:24,807 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:24,807 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:24,810 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:24,811 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d453ccf972e14a74a41a0977e7f87706 is 50, key is test_row_0/C:col10/1732368083209/Put/seqid=0 2024-11-23T13:21:24,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2535f7e990dc4f2789b792fb8fad01f4 is 50, key is test_row_0/C:col10/1732368083846/Put/seqid=0 2024-11-23T13:21:24,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741951_1127 (size=13527) 2024-11-23T13:21:24,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741952_1128 (size=12301) 2024-11-23T13:21:24,830 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=535 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2535f7e990dc4f2789b792fb8fad01f4 2024-11-23T13:21:24,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/cd7970f38a40452d96f952168a8e74d1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1 2024-11-23T13:21:24,843 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1, entries=150, sequenceid=535, filesize=12.0 K 2024-11-23T13:21:24,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/5840ccf2cfb54fa6b6d34348f6f95192 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192 2024-11-23T13:21:24,854 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192, entries=150, sequenceid=535, filesize=12.0 K 2024-11-23T13:21:24,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/2535f7e990dc4f2789b792fb8fad01f4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4 2024-11-23T13:21:24,861 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4, entries=150, sequenceid=535, filesize=12.0 K 2024-11-23T13:21:24,863 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 519df349e6147d27e7c8246089c4409f in 152ms, sequenceid=535, compaction requested=false 2024-11-23T13:21:24,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:24,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:24,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-23T13:21:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-23T13:21:24,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-23T13:21:24,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 767 msec 2024-11-23T13:21:24,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 771 msec 2024-11-23T13:21:24,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:24,979 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:24,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:24,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c8c30762a2ed4eef9d6c639d87d0a96f is 50, key is test_row_0/A:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741953_1129 (size=12301) 2024-11-23T13:21:24,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c8c30762a2ed4eef9d6c639d87d0a96f 2024-11-23T13:21:25,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/43a1eb8560244279933e737fa12bbcb8 is 50, key is test_row_0/B:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:25,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741954_1130 (size=12301) 2024-11-23T13:21:25,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/43a1eb8560244279933e737fa12bbcb8 2024-11-23T13:21:25,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d092619b903940c3a76da02ba91bf354 is 50, key is test_row_0/C:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:25,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741955_1131 (size=12301) 2024-11-23T13:21:25,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=548 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d092619b903940c3a76da02ba91bf354 2024-11-23T13:21:25,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c8c30762a2ed4eef9d6c639d87d0a96f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f 2024-11-23T13:21:25,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f, entries=150, sequenceid=548, filesize=12.0 K 2024-11-23T13:21:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/43a1eb8560244279933e737fa12bbcb8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8 2024-11-23T13:21:25,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8, entries=150, sequenceid=548, filesize=12.0 K 2024-11-23T13:21:25,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d092619b903940c3a76da02ba91bf354 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354 2024-11-23T13:21:25,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354, entries=150, sequenceid=548, filesize=12.0 K 2024-11-23T13:21:25,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 519df349e6147d27e7c8246089c4409f in 85ms, sequenceid=548, compaction requested=true 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:25,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:25,064 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:25,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:21:25,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:21:25,065 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:25,065 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:25,065 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:25,065 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a9d2b6801e524e29a57c571aa15a90b2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.2 K 2024-11-23T13:21:25,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,066 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9d2b6801e524e29a57c571aa15a90b2, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:25,067 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd7970f38a40452d96f952168a8e74d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732368083832 2024-11-23T13:21:25,067 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8c30762a2ed4eef9d6c639d87d0a96f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:25,080 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:25,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5dd1529fb1be4b4093d3f0bf90c615e3 is 50, key is test_row_0/A:col10/1732368085050/Put/seqid=0 2024-11-23T13:21:25,081 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/d062926c6ce84e13b3636fc7ebfa7c36 is 50, key is test_row_0/A:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:25,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741956_1132 (size=17181) 2024-11-23T13:21:25,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741957_1133 (size=13629) 2024-11-23T13:21:25,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 392 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368145090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 394 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368145193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-23T13:21:25,202 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-23T13:21:25,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:25,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-23T13:21:25,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T13:21:25,205 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:25,205 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:25,205 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:25,232 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/d453ccf972e14a74a41a0977e7f87706 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d453ccf972e14a74a41a0977e7f87706 2024-11-23T13:21:25,238 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into d453ccf972e14a74a41a0977e7f87706(size=13.2 K), total size for store is 37.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:25,238 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,238 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=12, startTime=1732368084705; duration=0sec 2024-11-23T13:21:25,239 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:21:25,239 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:25,239 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:25,239 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:25,240 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:25,240 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:25,240 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,240 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/669ba17311624c4084772f709e21a41e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.2 K 2024-11-23T13:21:25,241 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 669ba17311624c4084772f709e21a41e, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:25,241 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5840ccf2cfb54fa6b6d34348f6f95192, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732368083832 2024-11-23T13:21:25,242 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a1eb8560244279933e737fa12bbcb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:25,250 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#119 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:25,250 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/361c6dce4f1f41e794bf7bf36435d340 is 50, key is test_row_0/B:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:25,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741958_1134 (size=13629) 2024-11-23T13:21:25,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T13:21:25,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:25,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:25,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 396 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368145397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=574 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5dd1529fb1be4b4093d3f0bf90c615e3 2024-11-23T13:21:25,499 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/d062926c6ce84e13b3636fc7ebfa7c36 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/d062926c6ce84e13b3636fc7ebfa7c36 2024-11-23T13:21:25,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3e330483d8be4a6195afabf454734097 is 50, key is test_row_0/B:col10/1732368085050/Put/seqid=0 2024-11-23T13:21:25,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T13:21:25,508 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into d062926c6ce84e13b3636fc7ebfa7c36(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:25,508 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,508 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368085064; duration=0sec 2024-11-23T13:21:25,508 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:25,508 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:25,509 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:25,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741959_1135 (size=12301) 2024-11-23T13:21:25,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=574 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3e330483d8be4a6195afabf454734097 2024-11-23T13:21:25,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:25,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:25,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,511 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:25,512 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:25,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,512 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,512 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d453ccf972e14a74a41a0977e7f87706, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.2 K 2024-11-23T13:21:25,512 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d453ccf972e14a74a41a0977e7f87706, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1732368083206 2024-11-23T13:21:25,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,513 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2535f7e990dc4f2789b792fb8fad01f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=535, earliestPutTs=1732368083832 2024-11-23T13:21:25,513 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d092619b903940c3a76da02ba91bf354, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:25,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bc9b20e94c9c4b458a8ebe30dbcb54af is 50, key is test_row_0/C:col10/1732368085050/Put/seqid=0 2024-11-23T13:21:25,522 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#122 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:25,523 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0a4b4f015c6741e980bd41b081e626f2 is 50, key is test_row_0/C:col10/1732368084975/Put/seqid=0 2024-11-23T13:21:25,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741960_1136 (size=12301) 2024-11-23T13:21:25,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=574 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bc9b20e94c9c4b458a8ebe30dbcb54af 2024-11-23T13:21:25,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741961_1137 (size=13629) 2024-11-23T13:21:25,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/5dd1529fb1be4b4093d3f0bf90c615e3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3 2024-11-23T13:21:25,535 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3, entries=250, sequenceid=574, filesize=16.8 K 2024-11-23T13:21:25,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/3e330483d8be4a6195afabf454734097 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097 2024-11-23T13:21:25,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097, entries=150, sequenceid=574, filesize=12.0 K 2024-11-23T13:21:25,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/bc9b20e94c9c4b458a8ebe30dbcb54af as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af 2024-11-23T13:21:25,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af, entries=150, sequenceid=574, filesize=12.0 K 2024-11-23T13:21:25,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 519df349e6147d27e7c8246089c4409f in 486ms, sequenceid=574, compaction requested=false 2024-11-23T13:21:25,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:25,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:25,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:25,596 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0505fa4641fa4582b0f0c5335703d228 is 50, key is test_row_0/A:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:25,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741962_1138 (size=14741) 2024-11-23T13:21:25,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368145621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368145623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368145623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368145623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,663 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/361c6dce4f1f41e794bf7bf36435d340 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/361c6dce4f1f41e794bf7bf36435d340 2024-11-23T13:21:25,665 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:25,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:25,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,669 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 361c6dce4f1f41e794bf7bf36435d340(size=13.3 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:25,670 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,670 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368085064; duration=0sec 2024-11-23T13:21:25,670 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:25,670 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:25,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 398 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368145700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368145725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368145727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368145727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368145728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T13:21:25,818 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:25,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:25,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368145931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368145932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368145932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368145932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,939 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/0a4b4f015c6741e980bd41b081e626f2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0a4b4f015c6741e980bd41b081e626f2 2024-11-23T13:21:25,945 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 0a4b4f015c6741e980bd41b081e626f2(size=13.3 K), total size for store is 25.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:25,945 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:25,945 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368085064; duration=0sec 2024-11-23T13:21:25,945 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:25,945 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:25,972 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:25,972 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:25,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:25,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:25,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,001 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=587 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0505fa4641fa4582b0f0c5335703d228 2024-11-23T13:21:26,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a29bfeae3fa741c3838cd361a32c41d6 is 50, key is test_row_0/B:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:26,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741963_1139 (size=12301) 2024-11-23T13:21:26,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=587 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a29bfeae3fa741c3838cd361a32c41d6 2024-11-23T13:21:26,027 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/df9825a4f5ce4b68bf5688676524c0c2 is 50, key is test_row_0/C:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:26,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741964_1140 (size=12301) 2024-11-23T13:21:26,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=587 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/df9825a4f5ce4b68bf5688676524c0c2 2024-11-23T13:21:26,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/0505fa4641fa4582b0f0c5335703d228 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228 2024-11-23T13:21:26,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228, entries=200, sequenceid=587, filesize=14.4 K 2024-11-23T13:21:26,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/a29bfeae3fa741c3838cd361a32c41d6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6 2024-11-23T13:21:26,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6, entries=150, sequenceid=587, filesize=12.0 K 2024-11-23T13:21:26,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/df9825a4f5ce4b68bf5688676524c0c2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2 2024-11-23T13:21:26,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2, entries=150, sequenceid=587, filesize=12.0 K 2024-11-23T13:21:26,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 519df349e6147d27e7c8246089c4409f in 481ms, sequenceid=587, compaction requested=true 2024-11-23T13:21:26,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:26,072 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:26,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:26,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:26,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:26,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:26,073 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:26,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:26,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:26,074 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:26,074 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:26,074 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,074 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/d062926c6ce84e13b3636fc7ebfa7c36, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=44.5 K 2024-11-23T13:21:26,075 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:26,075 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:26,075 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,075 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/361c6dce4f1f41e794bf7bf36435d340, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.3 K 2024-11-23T13:21:26,075 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d062926c6ce84e13b3636fc7ebfa7c36, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:26,076 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 361c6dce4f1f41e794bf7bf36435d340, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:26,076 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dd1529fb1be4b4093d3f0bf90c615e3, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=574, earliestPutTs=1732368085044 2024-11-23T13:21:26,076 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e330483d8be4a6195afabf454734097, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=574, earliestPutTs=1732368085044 2024-11-23T13:21:26,077 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a29bfeae3fa741c3838cd361a32c41d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:26,077 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0505fa4641fa4582b0f0c5335703d228, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:26,090 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#126 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:26,091 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b04bf9d667404e20962f1c91eecb87f4 is 50, key is test_row_0/B:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:26,103 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:26,104 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1f99e1796f5143e2ae693c9d25cc7331 is 50, key is test_row_0/A:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:26,126 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-23T13:21:26,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,127 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:26,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741965_1141 (size=13731) 2024-11-23T13:21:26,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741966_1142 (size=13731) 2024-11-23T13:21:26,142 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/b04bf9d667404e20962f1c91eecb87f4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b04bf9d667404e20962f1c91eecb87f4 2024-11-23T13:21:26,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/b73d841a62474f7a9dc1763fdc973f8b is 50, key is test_row_0/A:col10/1732368085620/Put/seqid=0 2024-11-23T13:21:26,148 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/1f99e1796f5143e2ae693c9d25cc7331 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1f99e1796f5143e2ae693c9d25cc7331 2024-11-23T13:21:26,153 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into b04bf9d667404e20962f1c91eecb87f4(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:26,153 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:26,153 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368086072; duration=0sec 2024-11-23T13:21:26,153 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:26,153 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:26,153 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:26,155 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:26,155 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:26,155 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,155 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0a4b4f015c6741e980bd41b081e626f2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.3 K 2024-11-23T13:21:26,156 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a4b4f015c6741e980bd41b081e626f2, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=548, earliestPutTs=1732368084967 2024-11-23T13:21:26,157 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bc9b20e94c9c4b458a8ebe30dbcb54af, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=574, earliestPutTs=1732368085044 2024-11-23T13:21:26,157 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting df9825a4f5ce4b68bf5688676524c0c2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:26,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741967_1143 (size=12301) 2024-11-23T13:21:26,160 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into 1f99e1796f5143e2ae693c9d25cc7331(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:26,160 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:26,160 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368086072; duration=0sec 2024-11-23T13:21:26,160 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:26,161 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:26,162 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/b73d841a62474f7a9dc1763fdc973f8b 2024-11-23T13:21:26,174 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:26,175 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/1a57cd1b4c81487cb72702a0ab157f26 is 50, key is test_row_0/C:col10/1732368085588/Put/seqid=0 2024-11-23T13:21:26,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e9f8921919c2462fa1b36315bc4d4550 is 50, key is test_row_0/B:col10/1732368085620/Put/seqid=0 2024-11-23T13:21:26,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741968_1144 (size=13731) 2024-11-23T13:21:26,197 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/1a57cd1b4c81487cb72702a0ab157f26 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/1a57cd1b4c81487cb72702a0ab157f26 2024-11-23T13:21:26,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741969_1145 (size=12301) 2024-11-23T13:21:26,200 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e9f8921919c2462fa1b36315bc4d4550 2024-11-23T13:21:26,206 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 1a57cd1b4c81487cb72702a0ab157f26(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:26,206 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:26,206 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368086073; duration=0sec 2024-11-23T13:21:26,207 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:26,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:26,207 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:26,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/e709cdb5f3d3470e95f31fd2de36811c is 50, key is test_row_0/C:col10/1732368085620/Put/seqid=0 2024-11-23T13:21:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741970_1146 (size=12301) 2024-11-23T13:21:26,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,232 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=614 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/e709cdb5f3d3470e95f31fd2de36811c 2024-11-23T13:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 408 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368146229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/b73d841a62474f7a9dc1763fdc973f8b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b 2024-11-23T13:21:26,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368146236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368146237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368146237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368146239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,246 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b, entries=150, sequenceid=614, filesize=12.0 K 2024-11-23T13:21:26,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/e9f8921919c2462fa1b36315bc4d4550 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550 2024-11-23T13:21:26,255 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550, entries=150, sequenceid=614, filesize=12.0 K 2024-11-23T13:21:26,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/e709cdb5f3d3470e95f31fd2de36811c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c 2024-11-23T13:21:26,265 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c, entries=150, sequenceid=614, filesize=12.0 K 2024-11-23T13:21:26,266 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 519df349e6147d27e7c8246089c4409f in 139ms, sequenceid=614, compaction requested=false 2024-11-23T13:21:26,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T13:21:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-23T13:21:26,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-23T13:21:26,270 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0630 sec 2024-11-23T13:21:26,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.0680 sec 2024-11-23T13:21:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-23T13:21:26,308 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-23T13:21:26,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-23T13:21:26,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T13:21:26,312 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:26,313 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:26,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:26,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:26,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:26,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:26,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:26,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:26,343 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/90da84f9f8bf4d9db34a71b6c5f32347 is 50, key is test_row_0/A:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:26,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741971_1147 (size=12301) 2024-11-23T13:21:26,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/90da84f9f8bf4d9db34a71b6c5f32347 2024-11-23T13:21:26,374 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/c782e57f87c94362bf36e9f46e3bdf3b is 50, key is test_row_0/B:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:26,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741972_1148 (size=12301) 2024-11-23T13:21:26,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 432 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368146399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T13:21:26,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:26,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:26,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 434 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368146502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T13:21:26,618 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:26,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:26,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 436 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368146705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52684 deadline: 1732368146741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52702 deadline: 1732368146745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1732368146745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52652 deadline: 1732368146746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,772 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:26,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:26,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/c782e57f87c94362bf36e9f46e3bdf3b 2024-11-23T13:21:26,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/314d6f0a7e354d05805aec6284ef3eeb is 50, key is test_row_0/C:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741973_1149 (size=12301) 2024-11-23T13:21:26,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T13:21:26,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:26,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:26,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:26,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:26,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:26,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:27,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 438 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52668 deadline: 1732368147011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:27,077 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:27,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. as already flushing 2024-11-23T13:21:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,078 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:27,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:27,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:27,206 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/314d6f0a7e354d05805aec6284ef3eeb 2024-11-23T13:21:27,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/90da84f9f8bf4d9db34a71b6c5f32347 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347 2024-11-23T13:21:27,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347, entries=150, sequenceid=629, filesize=12.0 K 2024-11-23T13:21:27,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/c782e57f87c94362bf36e9f46e3bdf3b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b 2024-11-23T13:21:27,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b, entries=150, sequenceid=629, filesize=12.0 K 2024-11-23T13:21:27,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/314d6f0a7e354d05805aec6284ef3eeb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb 2024-11-23T13:21:27,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb, entries=150, sequenceid=629, filesize=12.0 K 2024-11-23T13:21:27,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:27,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 519df349e6147d27e7c8246089c4409f in 894ms, sequenceid=629, compaction requested=true 2024-11-23T13:21:27,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:27,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:27,231 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:27,231 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:27,231 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 519df349e6147d27e7c8246089c4409f:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:27,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:27,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:27,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:27,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,233 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:27,233 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:27,233 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/A is initiating minor compaction (all files) 2024-11-23T13:21:27,233 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/B is initiating minor compaction (all files) 2024-11-23T13:21:27,233 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/A in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,233 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/B in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,233 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1f99e1796f5143e2ae693c9d25cc7331, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.4 K 2024-11-23T13:21:27,233 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b04bf9d667404e20962f1c91eecb87f4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.4 K 2024-11-23T13:21:27,234 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f99e1796f5143e2ae693c9d25cc7331, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:27,234 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b04bf9d667404e20962f1c91eecb87f4, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:27,234 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b73d841a62474f7a9dc1763fdc973f8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=614, earliestPutTs=1732368085620 2024-11-23T13:21:27,235 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e9f8921919c2462fa1b36315bc4d4550, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=614, earliestPutTs=1732368085620 2024-11-23T13:21:27,237 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90da84f9f8bf4d9db34a71b6c5f32347, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=629, earliestPutTs=1732368086226 2024-11-23T13:21:27,237 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c782e57f87c94362bf36e9f46e3bdf3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=629, earliestPutTs=1732368086226 2024-11-23T13:21:27,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c34c4199c13c498c88849e91278243d8 is 50, key is test_row_0/A:col10/1732368086379/Put/seqid=0 2024-11-23T13:21:27,250 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#B#compaction#137 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:27,250 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#A#compaction#136 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:27,251 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/16f3a725b1cf44a3b85f5120938d9e8e is 50, key is test_row_0/A:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:27,251 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/24db900acb69447dabd98bca5568271a is 50, key is test_row_0/B:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:27,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741974_1150 (size=12301) 2024-11-23T13:21:27,254 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=653 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c34c4199c13c498c88849e91278243d8 2024-11-23T13:21:27,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/04c4a2393dbe4adf8694456c95366da3 is 50, key is test_row_0/B:col10/1732368086379/Put/seqid=0 2024-11-23T13:21:27,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741976_1152 (size=13833) 2024-11-23T13:21:27,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741975_1151 (size=13833) 2024-11-23T13:21:27,277 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:51875 2024-11-23T13:21:27,277 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,278 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x70267494 to 127.0.0.1:51875 2024-11-23T13:21:27,278 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741977_1153 (size=12301) 2024-11-23T13:21:27,279 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/24db900acb69447dabd98bca5568271a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/24db900acb69447dabd98bca5568271a 2024-11-23T13:21:27,280 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=653 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/04c4a2393dbe4adf8694456c95366da3 2024-11-23T13:21:27,281 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/16f3a725b1cf44a3b85f5120938d9e8e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/16f3a725b1cf44a3b85f5120938d9e8e 2024-11-23T13:21:27,281 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:51875 2024-11-23T13:21:27,281 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,283 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c915d17 to 127.0.0.1:51875 2024-11-23T13:21:27,283 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,288 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/B of 519df349e6147d27e7c8246089c4409f into 24db900acb69447dabd98bca5568271a(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:27,288 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:27,288 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/B, priority=13, startTime=1732368087231; duration=0sec 2024-11-23T13:21:27,288 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:27,288 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:B 2024-11-23T13:21:27,288 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:27,290 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:27,290 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 519df349e6147d27e7c8246089c4409f/C is initiating minor compaction (all files) 2024-11-23T13:21:27,290 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 519df349e6147d27e7c8246089c4409f/C in TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,290 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/1a57cd1b4c81487cb72702a0ab157f26, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp, totalSize=37.4 K 2024-11-23T13:21:27,290 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a57cd1b4c81487cb72702a0ab157f26, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=587, earliestPutTs=1732368085066 2024-11-23T13:21:27,291 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/A of 519df349e6147d27e7c8246089c4409f into 16f3a725b1cf44a3b85f5120938d9e8e(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:27,291 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:27,291 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/A, priority=13, startTime=1732368087231; duration=0sec 2024-11-23T13:21:27,291 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:27,291 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e709cdb5f3d3470e95f31fd2de36811c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=614, earliestPutTs=1732368085620 2024-11-23T13:21:27,291 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:A 2024-11-23T13:21:27,291 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 314d6f0a7e354d05805aec6284ef3eeb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=629, earliestPutTs=1732368086226 2024-11-23T13:21:27,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5e6bc460539c405787d7c3243371c2b0 is 50, key is test_row_0/C:col10/1732368086379/Put/seqid=0 2024-11-23T13:21:27,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741978_1154 (size=12301) 2024-11-23T13:21:27,298 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=653 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5e6bc460539c405787d7c3243371c2b0 2024-11-23T13:21:27,300 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 519df349e6147d27e7c8246089c4409f#C#compaction#140 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:27,301 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/50bc1590dda5480eb522c35cd18394ed is 50, key is test_row_0/C:col10/1732368086226/Put/seqid=0 2024-11-23T13:21:27,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/c34c4199c13c498c88849e91278243d8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c34c4199c13c498c88849e91278243d8 2024-11-23T13:21:27,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741979_1155 (size=13833) 2024-11-23T13:21:27,307 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c34c4199c13c498c88849e91278243d8, entries=150, sequenceid=653, filesize=12.0 K 2024-11-23T13:21:27,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/04c4a2393dbe4adf8694456c95366da3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/04c4a2393dbe4adf8694456c95366da3 2024-11-23T13:21:27,312 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/04c4a2393dbe4adf8694456c95366da3, entries=150, sequenceid=653, filesize=12.0 K 2024-11-23T13:21:27,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/5e6bc460539c405787d7c3243371c2b0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5e6bc460539c405787d7c3243371c2b0 2024-11-23T13:21:27,318 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5e6bc460539c405787d7c3243371c2b0, entries=150, sequenceid=653, filesize=12.0 K 2024-11-23T13:21:27,318 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 519df349e6147d27e7c8246089c4409f in 87ms, sequenceid=653, compaction requested=false 2024-11-23T13:21:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-23T13:21:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-23T13:21:27,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-23T13:21:27,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0070 sec 2024-11-23T13:21:27,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.0120 sec 2024-11-23T13:21:27,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-23T13:21:27,415 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-23T13:21:27,518 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38766d64 to 127.0.0.1:51875 2024-11-23T13:21:27,518 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,712 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/50bc1590dda5480eb522c35cd18394ed as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/50bc1590dda5480eb522c35cd18394ed 2024-11-23T13:21:27,716 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 519df349e6147d27e7c8246089c4409f/C of 519df349e6147d27e7c8246089c4409f into 50bc1590dda5480eb522c35cd18394ed(size=13.5 K), total size for store is 25.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:27,717 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:27,717 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f., storeName=519df349e6147d27e7c8246089c4409f/C, priority=13, startTime=1732368087231; duration=0sec 2024-11-23T13:21:27,717 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:27,717 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 519df349e6147d27e7c8246089c4409f:C 2024-11-23T13:21:27,748 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:51875 2024-11-23T13:21:27,748 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,751 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:51875 2024-11-23T13:21:27,751 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,754 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5400112e to 127.0.0.1:51875 2024-11-23T13:21:27,754 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,756 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22cb07dd to 127.0.0.1:51875 2024-11-23T13:21:27,756 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 306 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6793 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6454 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2991 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8970 rows 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2980 2024-11-23T13:21:27,757 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8939 rows 2024-11-23T13:21:27,757 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:21:27,757 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f6e36fe to 127.0.0.1:51875 2024-11-23T13:21:27,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:27,761 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:21:27,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:21:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:27,774 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368087774"}]},"ts":"1732368087774"} 2024-11-23T13:21:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:27,775 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:21:27,777 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:21:27,779 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:21:27,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, UNASSIGN}] 2024-11-23T13:21:27,784 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, UNASSIGN 2024-11-23T13:21:27,785 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=519df349e6147d27e7c8246089c4409f, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:27,786 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:21:27,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:27,941 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:27,943 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:27,943 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:21:27,943 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 519df349e6147d27e7c8246089c4409f, disabling compactions & flushes 2024-11-23T13:21:27,943 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. after waiting 0 ms 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:27,944 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(2837): Flushing 519df349e6147d27e7c8246089c4409f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=A 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=B 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 519df349e6147d27e7c8246089c4409f, store=C 2024-11-23T13:21:27,944 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:27,948 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/26e7824157bc4f328331f6ad879759c7 is 50, key is test_row_0/A:col10/1732368087753/Put/seqid=0 2024-11-23T13:21:27,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741980_1156 (size=12301) 2024-11-23T13:21:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:28,353 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/26e7824157bc4f328331f6ad879759c7 2024-11-23T13:21:28,360 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/391173f7f90748cea98bb6f7b4e4a2ca is 50, key is test_row_0/B:col10/1732368087753/Put/seqid=0 2024-11-23T13:21:28,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741981_1157 (size=12301) 2024-11-23T13:21:28,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:28,765 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/391173f7f90748cea98bb6f7b4e4a2ca 2024-11-23T13:21:28,773 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/075d1f6210f749da9619a572d78787c0 is 50, key is test_row_0/C:col10/1732368087753/Put/seqid=0 2024-11-23T13:21:28,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741982_1158 (size=12301) 2024-11-23T13:21:28,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:29,177 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=664 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/075d1f6210f749da9619a572d78787c0 2024-11-23T13:21:29,182 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/A/26e7824157bc4f328331f6ad879759c7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/26e7824157bc4f328331f6ad879759c7 2024-11-23T13:21:29,187 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/26e7824157bc4f328331f6ad879759c7, entries=150, sequenceid=664, filesize=12.0 K 2024-11-23T13:21:29,187 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/B/391173f7f90748cea98bb6f7b4e4a2ca as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/391173f7f90748cea98bb6f7b4e4a2ca 2024-11-23T13:21:29,192 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/391173f7f90748cea98bb6f7b4e4a2ca, entries=150, sequenceid=664, filesize=12.0 K 2024-11-23T13:21:29,192 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/.tmp/C/075d1f6210f749da9619a572d78787c0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/075d1f6210f749da9619a572d78787c0 2024-11-23T13:21:29,197 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/075d1f6210f749da9619a572d78787c0, entries=150, sequenceid=664, filesize=12.0 K 2024-11-23T13:21:29,198 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 519df349e6147d27e7c8246089c4409f in 1254ms, sequenceid=664, compaction requested=true 2024-11-23T13:21:29,198 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f788875c844044b5a99e6c603cec027d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/da7786b4c2844541aaac75a865ef3e2a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/dae7886733c54fbc839e506bfa54b797, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e8de093a701e4a4b83dd0397f1302d8a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a298c9d1130c4112976ef3cfd792a52d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/867fcee249c74181912408d650e9ac58, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/92b2c48f4f1e4c9cae5eadbb1d296209, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/4fb9a51993f44b6084ccdf182faa25cb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e53dcc542b894b9495f8f9f951170274, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a9d2b6801e524e29a57c571aa15a90b2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/d062926c6ce84e13b3636fc7ebfa7c36, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1f99e1796f5143e2ae693c9d25cc7331, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347] to archive 2024-11-23T13:21:29,201 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:29,207 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/bd9798d3a6af44f9b2258a8de63909a5 2024-11-23T13:21:29,209 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f0228697838f4b528261e823f6707d8a 2024-11-23T13:21:29,210 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1a9e4aa2578547d0a827c6df99d32698 2024-11-23T13:21:29,211 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/69da27dfc1f041789f1d67ebecb85e2c 2024-11-23T13:21:29,212 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f788875c844044b5a99e6c603cec027d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/f788875c844044b5a99e6c603cec027d 2024-11-23T13:21:29,214 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0aee31dfccc44711a0b0e8c5238b0951 2024-11-23T13:21:29,215 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91f31174a6e8419abe51341066780093 2024-11-23T13:21:29,216 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/da7786b4c2844541aaac75a865ef3e2a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/da7786b4c2844541aaac75a865ef3e2a 2024-11-23T13:21:29,217 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/49b8beff60174ec0bdb28955ebdabb50 2024-11-23T13:21:29,218 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5766feb26dd74f8081520e2ebbb750b8 2024-11-23T13:21:29,220 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/dae7886733c54fbc839e506bfa54b797 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/dae7886733c54fbc839e506bfa54b797 2024-11-23T13:21:29,221 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fdaf06915f854f41bfc28387eebe4575 2024-11-23T13:21:29,222 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/91bcedab50d9418aa521d1e4cf8b79cc 2024-11-23T13:21:29,223 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0a947b38cb884dec8fc973004f5bf624 2024-11-23T13:21:29,224 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e8de093a701e4a4b83dd0397f1302d8a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e8de093a701e4a4b83dd0397f1302d8a 2024-11-23T13:21:29,226 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fb994febe70444078dc91bd26647cb52 2024-11-23T13:21:29,227 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/fa92163a33a3429d82c1d610a9a39588 2024-11-23T13:21:29,228 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a298c9d1130c4112976ef3cfd792a52d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a298c9d1130c4112976ef3cfd792a52d 2024-11-23T13:21:29,229 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/195ebe2045d04600bd8d2ad7fb86776e 2024-11-23T13:21:29,231 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0cb4da06a623471fb3be936bcc6d35d3 2024-11-23T13:21:29,232 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/706bfba7433e4161b86d69f575e58016 2024-11-23T13:21:29,233 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/867fcee249c74181912408d650e9ac58 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/867fcee249c74181912408d650e9ac58 2024-11-23T13:21:29,234 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0336d22c97144cdeb572b62142285625 2024-11-23T13:21:29,235 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1653cbfd2e4543b6bfbc23ece82e0847 2024-11-23T13:21:29,236 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/22d550f675ad49899dcd196d09b8048e 2024-11-23T13:21:29,237 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/92b2c48f4f1e4c9cae5eadbb1d296209 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/92b2c48f4f1e4c9cae5eadbb1d296209 2024-11-23T13:21:29,239 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/93fb3ccbf502425a970bba4895e220c9 2024-11-23T13:21:29,240 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/3451f5e8f08a454ab5e7a93bf9aa77f3 2024-11-23T13:21:29,241 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/96268a4b6e654bca9cf778b462b94fe4 2024-11-23T13:21:29,242 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/4fb9a51993f44b6084ccdf182faa25cb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/4fb9a51993f44b6084ccdf182faa25cb 2024-11-23T13:21:29,244 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0046e7f3d090429ba6fecc266786cb98 2024-11-23T13:21:29,245 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e53dcc542b894b9495f8f9f951170274 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/e53dcc542b894b9495f8f9f951170274 2024-11-23T13:21:29,246 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c538c76c9e1540f1996c23e7373f2ec2 2024-11-23T13:21:29,247 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/06283acd2e4e4bae81408ece19e5fc69 2024-11-23T13:21:29,248 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/6ed7a4aa0bac4fce9ff12dbfaeab8785 2024-11-23T13:21:29,249 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/8ec3513390be49b8bb2a35b68d2ebf73 2024-11-23T13:21:29,251 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a9d2b6801e524e29a57c571aa15a90b2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/a9d2b6801e524e29a57c571aa15a90b2 2024-11-23T13:21:29,252 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/cd7970f38a40452d96f952168a8e74d1 2024-11-23T13:21:29,253 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/d062926c6ce84e13b3636fc7ebfa7c36 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/d062926c6ce84e13b3636fc7ebfa7c36 2024-11-23T13:21:29,254 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c8c30762a2ed4eef9d6c639d87d0a96f 2024-11-23T13:21:29,255 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/5dd1529fb1be4b4093d3f0bf90c615e3 2024-11-23T13:21:29,257 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/0505fa4641fa4582b0f0c5335703d228 2024-11-23T13:21:29,258 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1f99e1796f5143e2ae693c9d25cc7331 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/1f99e1796f5143e2ae693c9d25cc7331 2024-11-23T13:21:29,259 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/b73d841a62474f7a9dc1763fdc973f8b 2024-11-23T13:21:29,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/90da84f9f8bf4d9db34a71b6c5f32347 2024-11-23T13:21:29,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a1ee73a0ae82404e9c09576fc4469959, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/75e68361ab684e0a8e33e55e5512653c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/7ebc1483365a46ffb7c9b846b269095a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a5571bd2c3da4c1eae009727373e08c8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9c20c904a3504caa8aa7360157d57370, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/f990998c1c7a4d538a106f0f7df56e67, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/40aa7ff75a8e4208b7ac62ddf1655c9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8059ac14dd6045ddb9876b3bc0e43347, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0a72a36c3e424fb49a7b3c212654fe8c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/669ba17311624c4084772f709e21a41e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/361c6dce4f1f41e794bf7bf36435d340, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b04bf9d667404e20962f1c91eecb87f4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b] to archive 2024-11-23T13:21:29,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:29,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/522d936688ed4a6cb83fffc27b67cd19 2024-11-23T13:21:29,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/efd9da8ea1ab4d62ac34b163a41cd624 2024-11-23T13:21:29,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8128cc2545e44515abd785d04b76ecd7 2024-11-23T13:21:29,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a1ee73a0ae82404e9c09576fc4469959 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a1ee73a0ae82404e9c09576fc4469959 2024-11-23T13:21:29,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8d958cd0999c403093a120161e3af535 2024-11-23T13:21:29,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0d1ae463c63048efac0b9733e32d65f0 2024-11-23T13:21:29,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ec792505ad954d329bc1854c9b391c9b 2024-11-23T13:21:29,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/75e68361ab684e0a8e33e55e5512653c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/75e68361ab684e0a8e33e55e5512653c 2024-11-23T13:21:29,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/35f322c4fd6d4733bf38e53bdca328c1 2024-11-23T13:21:29,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9994fd6278a84a19ae512cce84d4b199 2024-11-23T13:21:29,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/7ebc1483365a46ffb7c9b846b269095a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/7ebc1483365a46ffb7c9b846b269095a 2024-11-23T13:21:29,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/be046e5f5f754c3bb500e04532cf6cf0 2024-11-23T13:21:29,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e82fcb68879c436eb1f53d72f88863dd 2024-11-23T13:21:29,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/bf5593c0ae3f4d1aaf5b5b7b88d4869c 2024-11-23T13:21:29,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a5571bd2c3da4c1eae009727373e08c8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a5571bd2c3da4c1eae009727373e08c8 2024-11-23T13:21:29,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9260a2735a8443abb84c541bbca544b3 2024-11-23T13:21:29,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5c8041f59af94feeb22d5f5123a7fdf4 2024-11-23T13:21:29,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9c20c904a3504caa8aa7360157d57370 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/9c20c904a3504caa8aa7360157d57370 2024-11-23T13:21:29,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3d68828cffb24e6f8f37958efa03bcfd 2024-11-23T13:21:29,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8b217a7f0fac4853bd65a25c396fec4c 2024-11-23T13:21:29,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/66e974281b0342bb875519fee24a85f3 2024-11-23T13:21:29,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/f990998c1c7a4d538a106f0f7df56e67 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/f990998c1c7a4d538a106f0f7df56e67 2024-11-23T13:21:29,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/1bc9e437222f454ab8984f18bce7f4ad 2024-11-23T13:21:29,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8034e0d797e44ebb891e299c623acab8 2024-11-23T13:21:29,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e6d7b56e3baa4730b452410f4d160b26 2024-11-23T13:21:29,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/40aa7ff75a8e4208b7ac62ddf1655c9b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/40aa7ff75a8e4208b7ac62ddf1655c9b 2024-11-23T13:21:29,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/cf70377c0ef845008f0b730493227f62 2024-11-23T13:21:29,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b7130e5e5a994202ae675c7246b0e760 2024-11-23T13:21:29,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8059ac14dd6045ddb9876b3bc0e43347 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/8059ac14dd6045ddb9876b3bc0e43347 2024-11-23T13:21:29,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/aa003a7ddb4e46aeaa55bfdd12679577 2024-11-23T13:21:29,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/50a500468a6f42fe9cff14bd6ac913bc 2024-11-23T13:21:29,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0a72a36c3e424fb49a7b3c212654fe8c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/0a72a36c3e424fb49a7b3c212654fe8c 2024-11-23T13:21:29,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5d8f3e3ac68744369a0b99aa87f63892 2024-11-23T13:21:29,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b969eec4eb4b4475bceb74052af54764 2024-11-23T13:21:29,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/6e89716bda2649bd8fd0c5d2b39d9ac9 2024-11-23T13:21:29,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/669ba17311624c4084772f709e21a41e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/669ba17311624c4084772f709e21a41e 2024-11-23T13:21:29,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/ac31712ee5f249a9bbdf1c881cc22a49 2024-11-23T13:21:29,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/5840ccf2cfb54fa6b6d34348f6f95192 2024-11-23T13:21:29,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/361c6dce4f1f41e794bf7bf36435d340 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/361c6dce4f1f41e794bf7bf36435d340 2024-11-23T13:21:29,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/43a1eb8560244279933e737fa12bbcb8 2024-11-23T13:21:29,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/3e330483d8be4a6195afabf454734097 2024-11-23T13:21:29,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b04bf9d667404e20962f1c91eecb87f4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/b04bf9d667404e20962f1c91eecb87f4 2024-11-23T13:21:29,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/a29bfeae3fa741c3838cd361a32c41d6 2024-11-23T13:21:29,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/e9f8921919c2462fa1b36315bc4d4550 2024-11-23T13:21:29,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/c782e57f87c94362bf36e9f46e3bdf3b 2024-11-23T13:21:29,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2137edd728e441929612224219781ee7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/fac0eee2b23d4742a59416b4307f8041, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9daeeeec0b3b4cc3bb35cf03cb693a43, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/68fb036f65b54fb88f69771a561b045d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/59ac3f64665a4baeae9c550191a164e5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e527817d9f55437aa8595b72860d810c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/6350d9e6ad0a4b6fbb094983aa6d3425, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49443438957a4e2fb01fecd95fa9942c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/cd9b2dbef82c4df897fd39f7a0b1cdd8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d453ccf972e14a74a41a0977e7f87706, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0a4b4f015c6741e980bd41b081e626f2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/1a57cd1b4c81487cb72702a0ab157f26, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb] to archive 2024-11-23T13:21:29,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:29,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/43615e46523f46bcb56397c7f2f2a0ce 2024-11-23T13:21:29,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f2a551663d8e47e9addf9db8900859d8 2024-11-23T13:21:29,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2066f564e2844f31be87412a81ffcae9 2024-11-23T13:21:29,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2137edd728e441929612224219781ee7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2137edd728e441929612224219781ee7 2024-11-23T13:21:29,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9650542811e5424a98acf163e98bb3d4 2024-11-23T13:21:29,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5d8e936c13cd42f096dc805317619629 2024-11-23T13:21:29,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/4302726451d3429fa7a60fcef7f12aad 2024-11-23T13:21:29,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/fac0eee2b23d4742a59416b4307f8041 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/fac0eee2b23d4742a59416b4307f8041 2024-11-23T13:21:29,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bf39e173d63b43f9a8d320c01c1c2203 2024-11-23T13:21:29,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/10effabfd0f04d698fd9af813eabf4da 2024-11-23T13:21:29,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9daeeeec0b3b4cc3bb35cf03cb693a43 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/9daeeeec0b3b4cc3bb35cf03cb693a43 2024-11-23T13:21:29,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d30458de97fc4e228b7204b6470b18a0 2024-11-23T13:21:29,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0899b126280c43de96ed6f0818bd7535 2024-11-23T13:21:29,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/27d06dc86d2e4de7b98424b1f124d61e 2024-11-23T13:21:29,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/68fb036f65b54fb88f69771a561b045d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/68fb036f65b54fb88f69771a561b045d 2024-11-23T13:21:29,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2807abe5f9754699a49e47fa95672b3c 2024-11-23T13:21:29,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2c489f274926488cb0c6767106ffbe9d 2024-11-23T13:21:29,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/59ac3f64665a4baeae9c550191a164e5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/59ac3f64665a4baeae9c550191a164e5 2024-11-23T13:21:29,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/7487fe022d52497789c826a798c87ccf 2024-11-23T13:21:29,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0363a37d34d2411b8f4d8a9905f21b2c 2024-11-23T13:21:29,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3b9f4095c481432d995ea9998b18a7ba 2024-11-23T13:21:29,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e527817d9f55437aa8595b72860d810c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e527817d9f55437aa8595b72860d810c 2024-11-23T13:21:29,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d59a21c3de2e44089a72ffb745ac554c 2024-11-23T13:21:29,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/c9581b0eeb934015b67cd7177f21aae2 2024-11-23T13:21:29,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/aa3bd63b9ede4aa089f75dcaec4d431e 2024-11-23T13:21:29,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/6350d9e6ad0a4b6fbb094983aa6d3425 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/6350d9e6ad0a4b6fbb094983aa6d3425 2024-11-23T13:21:29,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3a4b327b386c43ba843cbf5008d5909b 2024-11-23T13:21:29,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2a11b0853aa84b8d9bb8829836444924 2024-11-23T13:21:29,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49443438957a4e2fb01fecd95fa9942c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49443438957a4e2fb01fecd95fa9942c 2024-11-23T13:21:29,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/f1f5ed0187064a67ad7bfc99a74cd996 2024-11-23T13:21:29,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/3895c6dc78204a8285cf1e561ea237e1 2024-11-23T13:21:29,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/cd9b2dbef82c4df897fd39f7a0b1cdd8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/cd9b2dbef82c4df897fd39f7a0b1cdd8 2024-11-23T13:21:29,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/ec674a71cebc4c57aa5a5bc971df3ba8 2024-11-23T13:21:29,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/225fa415b96f4029a3ec7e3fce7713e5 2024-11-23T13:21:29,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/49f040bc467d413c9360fe3411983357 2024-11-23T13:21:29,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d453ccf972e14a74a41a0977e7f87706 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d453ccf972e14a74a41a0977e7f87706 2024-11-23T13:21:29,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/40c8bfcf79274dc0bd9924bfdf7b47f3 2024-11-23T13:21:29,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/2535f7e990dc4f2789b792fb8fad01f4 2024-11-23T13:21:29,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0a4b4f015c6741e980bd41b081e626f2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/0a4b4f015c6741e980bd41b081e626f2 2024-11-23T13:21:29,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/d092619b903940c3a76da02ba91bf354 2024-11-23T13:21:29,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/bc9b20e94c9c4b458a8ebe30dbcb54af 2024-11-23T13:21:29,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/1a57cd1b4c81487cb72702a0ab157f26 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/1a57cd1b4c81487cb72702a0ab157f26 2024-11-23T13:21:29,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/df9825a4f5ce4b68bf5688676524c0c2 2024-11-23T13:21:29,380 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/e709cdb5f3d3470e95f31fd2de36811c 2024-11-23T13:21:29,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/314d6f0a7e354d05805aec6284ef3eeb 2024-11-23T13:21:29,386 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/recovered.edits/667.seqid, newMaxSeqId=667, maxSeqId=1 2024-11-23T13:21:29,389 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f. 2024-11-23T13:21:29,389 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 519df349e6147d27e7c8246089c4409f: 2024-11-23T13:21:29,391 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:29,391 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=519df349e6147d27e7c8246089c4409f, regionState=CLOSED 2024-11-23T13:21:29,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-23T13:21:29,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 519df349e6147d27e7c8246089c4409f, server=ba2e440802a7,33173,1732368061317 in 1.6060 sec 2024-11-23T13:21:29,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-11-23T13:21:29,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=519df349e6147d27e7c8246089c4409f, UNASSIGN in 1.6100 sec 2024-11-23T13:21:29,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-23T13:21:29,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6180 sec 2024-11-23T13:21:29,399 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368089398"}]},"ts":"1732368089398"} 2024-11-23T13:21:29,400 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:21:29,402 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:21:29,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6350 sec 2024-11-23T13:21:29,719 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:21:29,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-23T13:21:29,879 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-23T13:21:29,882 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:21:29,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,887 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=36, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,888 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=36, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-23T13:21:29,892 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:29,896 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/recovered.edits] 2024-11-23T13:21:29,899 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/16f3a725b1cf44a3b85f5120938d9e8e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/16f3a725b1cf44a3b85f5120938d9e8e 2024-11-23T13:21:29,900 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/26e7824157bc4f328331f6ad879759c7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/26e7824157bc4f328331f6ad879759c7 2024-11-23T13:21:29,901 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c34c4199c13c498c88849e91278243d8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/A/c34c4199c13c498c88849e91278243d8 2024-11-23T13:21:29,903 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/04c4a2393dbe4adf8694456c95366da3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/04c4a2393dbe4adf8694456c95366da3 2024-11-23T13:21:29,905 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/24db900acb69447dabd98bca5568271a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/24db900acb69447dabd98bca5568271a 2024-11-23T13:21:29,906 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/391173f7f90748cea98bb6f7b4e4a2ca to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/B/391173f7f90748cea98bb6f7b4e4a2ca 2024-11-23T13:21:29,908 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/075d1f6210f749da9619a572d78787c0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/075d1f6210f749da9619a572d78787c0 2024-11-23T13:21:29,910 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/50bc1590dda5480eb522c35cd18394ed to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/50bc1590dda5480eb522c35cd18394ed 2024-11-23T13:21:29,911 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5e6bc460539c405787d7c3243371c2b0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/C/5e6bc460539c405787d7c3243371c2b0 2024-11-23T13:21:29,914 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/recovered.edits/667.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f/recovered.edits/667.seqid 2024-11-23T13:21:29,915 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/519df349e6147d27e7c8246089c4409f 2024-11-23T13:21:29,915 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:21:29,920 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=36, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T13:21:29,927 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:21:29,961 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:21:29,963 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=36, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,963 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:21:29,963 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368089963"}]},"ts":"9223372036854775807"} 2024-11-23T13:21:29,967 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:21:29,967 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 519df349e6147d27e7c8246089c4409f, NAME => 'TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:21:29,967 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:21:29,967 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368089967"}]},"ts":"9223372036854775807"} 2024-11-23T13:21:29,970 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:21:29,972 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=36, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:29,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 90 msec 2024-11-23T13:21:29,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-23T13:21:29,990 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-23T13:21:30,000 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-953829615_22 at /127.0.0.1:58224 [Waiting for operation #322] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2126048790_22 at /127.0.0.1:40360 [Waiting for operation #335] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2126048790_22 at /127.0.0.1:58432 [Waiting for operation #245] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;ba2e440802a7:33173-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=461 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=378 (was 197) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3839 (was 4437) 2024-11-23T13:21:30,009 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=378, ProcessCount=11, AvailableMemoryMB=3839 2024-11-23T13:21:30,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:21:30,011 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:21:30,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:30,014 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:21:30,014 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:30,014 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 37 2024-11-23T13:21:30,014 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:21:30,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:30,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741983_1159 (size=960) 2024-11-23T13:21:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:30,424 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:30,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741984_1160 (size=53) 2024-11-23T13:21:30,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 329ab862a28643091a2def94193b04dc, disabling compactions & flushes 2024-11-23T13:21:30,830 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. after waiting 0 ms 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:30,830 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:30,830 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:30,831 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:21:30,832 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368090831"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368090831"}]},"ts":"1732368090831"} 2024-11-23T13:21:30,833 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:21:30,834 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:21:30,834 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368090834"}]},"ts":"1732368090834"} 2024-11-23T13:21:30,835 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:21:30,839 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, ASSIGN}] 2024-11-23T13:21:30,840 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, ASSIGN 2024-11-23T13:21:30,841 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:21:30,991 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:30,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; OpenRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:31,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:21:31,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:31,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:31,148 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:31,149 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7285): Opening region: {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:31,149 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,149 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:31,149 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7327): checking encryption for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,149 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7330): checking classloading for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,151 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,152 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:31,152 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName A 2024-11-23T13:21:31,153 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:31,153 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:31,153 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,154 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:31,155 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName B 2024-11-23T13:21:31,155 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:31,155 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:31,155 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,157 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:31,157 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName C 2024-11-23T13:21:31,157 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:31,157 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:31,157 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:31,158 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,159 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,160 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:31,161 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1085): writing seq id for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:31,163 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:31,164 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1102): Opened 329ab862a28643091a2def94193b04dc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65600476, jitterRate=-0.02247673273086548}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:31,165 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1001): Region open journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:31,165 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., pid=39, masterSystemTime=1732368091145 2024-11-23T13:21:31,167 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:31,167 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:31,168 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:31,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-23T13:21:31,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; OpenRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 in 176 msec 2024-11-23T13:21:31,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-11-23T13:21:31,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, ASSIGN in 332 msec 2024-11-23T13:21:31,173 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:21:31,174 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368091174"}]},"ts":"1732368091174"} 2024-11-23T13:21:31,175 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:21:31,178 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=37, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:21:31,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1670 sec 2024-11-23T13:21:32,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-11-23T13:21:32,121 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-11-23T13:21:32,122 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a9b9802 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@118b007e 2024-11-23T13:21:32,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d29de25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:32,128 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:32,130 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:32,132 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:21:32,133 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:21:32,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:21:32,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:21:32,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:32,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741985_1161 (size=996) 2024-11-23T13:21:32,560 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T13:21:32,560 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T13:21:32,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:21:32,572 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, REOPEN/MOVE}] 2024-11-23T13:21:32,572 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, REOPEN/MOVE 2024-11-23T13:21:32,573 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:32,574 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:21:32,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; CloseRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:32,725 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:32,726 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(124): Close 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:32,726 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:21:32,726 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1681): Closing 329ab862a28643091a2def94193b04dc, disabling compactions & flushes 2024-11-23T13:21:32,726 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:32,726 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:32,726 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. after waiting 0 ms 2024-11-23T13:21:32,726 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:32,730 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T13:21:32,731 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:32,731 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1635): Region close journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:32,731 WARN [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegionServer(3786): Not adding moved region record: 329ab862a28643091a2def94193b04dc to self. 2024-11-23T13:21:32,732 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(170): Closed 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:32,733 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=CLOSED 2024-11-23T13:21:32,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-11-23T13:21:32,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; CloseRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 in 160 msec 2024-11-23T13:21:32,736 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, REOPEN/MOVE; state=CLOSED, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=true 2024-11-23T13:21:32,886 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:32,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:33,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,043 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,043 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:33,043 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,043 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:33,043 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,044 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,048 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,049 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:33,053 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName A 2024-11-23T13:21:33,055 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:33,056 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:33,056 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,057 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:33,057 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName B 2024-11-23T13:21:33,057 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:33,057 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:33,057 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,058 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:33,058 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 329ab862a28643091a2def94193b04dc columnFamilyName C 2024-11-23T13:21:33,058 DEBUG [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:33,059 INFO [StoreOpener-329ab862a28643091a2def94193b04dc-1 {}] regionserver.HStore(327): Store=329ab862a28643091a2def94193b04dc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:33,059 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,059 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,060 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,062 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:33,064 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,064 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 329ab862a28643091a2def94193b04dc; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68559778, jitterRate=0.02162030339241028}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:33,065 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:33,066 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., pid=44, masterSystemTime=1732368093039 2024-11-23T13:21:33,067 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,068 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,068 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=OPEN, openSeqNum=5, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-11-23T13:21:33,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 in 181 msec 2024-11-23T13:21:33,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-11-23T13:21:33,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, REOPEN/MOVE in 499 msec 2024-11-23T13:21:33,075 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-23T13:21:33,076 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 511 msec 2024-11-23T13:21:33,079 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 935 msec 2024-11-23T13:21:33,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-11-23T13:21:33,089 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-23T13:21:33,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,100 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-11-23T13:21:33,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-23T13:21:33,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-23T13:21:33,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,114 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-11-23T13:21:33,118 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,119 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-23T13:21:33,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-23T13:21:33,126 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,127 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-23T13:21:33,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,131 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-11-23T13:21:33,134 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:33,138 DEBUG [hconnection-0x22db0e45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,138 DEBUG [hconnection-0x1087dbe5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,139 DEBUG [hconnection-0x7ddc8ee1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,139 DEBUG [hconnection-0x3a10a185-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,140 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,140 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,140 DEBUG [hconnection-0x4856fb5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,140 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:33,142 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-23T13:21:33,143 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:33,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:33,143 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:33,144 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:33,146 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37520, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:21:33,154 DEBUG [hconnection-0x13f2c2e4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,154 DEBUG [hconnection-0x2c1ebb2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,154 DEBUG [hconnection-0x1b8549ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,154 DEBUG [hconnection-0x3b068cd0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:33,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:33,155 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:33,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:33,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,156 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37538, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,157 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,160 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37542, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:33,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368153192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368153194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368153194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368153196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368153200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ba937118838c4d938243ae727d712b3e_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368093152/Put/seqid=0 2024-11-23T13:21:33,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741986_1162 (size=12154) 2024-11-23T13:21:33,231 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:33,237 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ba937118838c4d938243ae727d712b3e_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba937118838c4d938243ae727d712b3e_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,241 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/9c11251b964941bbb4b14ec8c6a16332, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:33,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:33,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/9c11251b964941bbb4b14ec8c6a16332 is 175, key is test_row_0/A:col10/1732368093152/Put/seqid=0 2024-11-23T13:21:33,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741987_1163 (size=30955) 2024-11-23T13:21:33,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:33,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368153305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368153305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368153306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368153306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368153306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:33,450 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:33,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:33,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368153511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368153511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368153512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368153519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368153520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,604 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,605 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:33,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:33,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,667 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/9c11251b964941bbb4b14ec8c6a16332 2024-11-23T13:21:33,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/019accfee80d4b85b9eb4e17c6e2c8c9 is 50, key is test_row_0/B:col10/1732368093152/Put/seqid=0 2024-11-23T13:21:33,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741988_1164 (size=12001) 2024-11-23T13:21:33,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/019accfee80d4b85b9eb4e17c6e2c8c9 2024-11-23T13:21:33,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/d0d3e71fd37a4feea530420e3490185f is 50, key is test_row_0/C:col10/1732368093152/Put/seqid=0 2024-11-23T13:21:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741989_1165 (size=12001) 2024-11-23T13:21:33,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/d0d3e71fd37a4feea530420e3490185f 2024-11-23T13:21:33,758 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:33,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:33,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/9c11251b964941bbb4b14ec8c6a16332 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332 2024-11-23T13:21:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,766 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332, entries=150, sequenceid=16, filesize=30.2 K 2024-11-23T13:21:33,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/019accfee80d4b85b9eb4e17c6e2c8c9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9 2024-11-23T13:21:33,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T13:21:33,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/d0d3e71fd37a4feea530420e3490185f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f 2024-11-23T13:21:33,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f, entries=150, sequenceid=16, filesize=11.7 K 2024-11-23T13:21:33,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 329ab862a28643091a2def94193b04dc in 635ms, sequenceid=16, compaction requested=false 2024-11-23T13:21:33,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:33,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:33,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:33,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233c0188774fd8468198ac6c4c162173d2_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368093821/Put/seqid=0 2024-11-23T13:21:33,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368153836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368153840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368153840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368153840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368153842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741990_1166 (size=17034) 2024-11-23T13:21:33,859 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:33,865 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233c0188774fd8468198ac6c4c162173d2_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233c0188774fd8468198ac6c4c162173d2_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:33,867 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/f1bc792fd82a45caa3276205ef6ea27a, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:33,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/f1bc792fd82a45caa3276205ef6ea27a is 175, key is test_row_0/A:col10/1732368093821/Put/seqid=0 2024-11-23T13:21:33,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741991_1167 (size=48139) 2024-11-23T13:21:33,883 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/f1bc792fd82a45caa3276205ef6ea27a 2024-11-23T13:21:33,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/829b398771ac4abfb3e4d1c785aaac4f is 50, key is test_row_0/B:col10/1732368093821/Put/seqid=0 2024-11-23T13:21:33,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:33,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:33,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:33,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:33,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741992_1168 (size=12001) 2024-11-23T13:21:33,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368153950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368153952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368153950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368153952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:33,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368153952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368154153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368154155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368154155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368154155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368154155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,230 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,230 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:34,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/829b398771ac4abfb3e4d1c785aaac4f 2024-11-23T13:21:34,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/dc92b4f700d3418aaccd5117fc865d6e is 50, key is test_row_0/C:col10/1732368093821/Put/seqid=0 2024-11-23T13:21:34,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741993_1169 (size=12001) 2024-11-23T13:21:34,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/dc92b4f700d3418aaccd5117fc865d6e 2024-11-23T13:21:34,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/f1bc792fd82a45caa3276205ef6ea27a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a 2024-11-23T13:21:34,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a, entries=250, sequenceid=42, filesize=47.0 K 2024-11-23T13:21:34,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/829b398771ac4abfb3e4d1c785aaac4f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f 2024-11-23T13:21:34,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f, entries=150, sequenceid=42, filesize=11.7 K 2024-11-23T13:21:34,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/dc92b4f700d3418aaccd5117fc865d6e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e 2024-11-23T13:21:34,383 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e, entries=150, sequenceid=42, filesize=11.7 K 2024-11-23T13:21:34,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,386 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 329ab862a28643091a2def94193b04dc in 562ms, sequenceid=42, compaction requested=false 2024-11-23T13:21:34,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:34,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:34,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:34,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112363287c6123d34edbb43a6c5820c609c4_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:34,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741994_1170 (size=14594) 2024-11-23T13:21:34,486 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:34,496 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112363287c6123d34edbb43a6c5820c609c4_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112363287c6123d34edbb43a6c5820c609c4_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:34,498 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/8903b8263a934fba936b19de9c4d569e, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:34,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/8903b8263a934fba936b19de9c4d569e is 175, key is test_row_0/A:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:34,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368154493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368154493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368154495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741995_1171 (size=39549) 2024-11-23T13:21:34,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368154500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368154503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,606 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368154604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368154605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368154605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368154613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368154618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368154808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368154809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368154810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368154815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:34,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368154822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,914 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/8903b8263a934fba936b19de9c4d569e 2024-11-23T13:21:34,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/2ff7381550ce4f6d91be6e7fc194954c is 50, key is test_row_0/B:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:34,933 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:21:34,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741996_1172 (size=12001) 2024-11-23T13:21:34,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/2ff7381550ce4f6d91be6e7fc194954c 2024-11-23T13:21:34,968 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/4d9790bf543a4edf858c16f4da7246da is 50, key is test_row_0/C:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:34,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:34,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741997_1173 (size=12001) 2024-11-23T13:21:34,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:34,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368155111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368155112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368155115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368155118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368155126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,151 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:35,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:35,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:35,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:35,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:35,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:35,383 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T13:21:35,384 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T13:21:35,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/4d9790bf543a4edf858c16f4da7246da 2024-11-23T13:21:35,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/8903b8263a934fba936b19de9c4d569e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e 2024-11-23T13:21:35,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e, entries=200, sequenceid=56, filesize=38.6 K 2024-11-23T13:21:35,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/2ff7381550ce4f6d91be6e7fc194954c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c 2024-11-23T13:21:35,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c, entries=150, sequenceid=56, filesize=11.7 K 2024-11-23T13:21:35,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/4d9790bf543a4edf858c16f4da7246da as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da 2024-11-23T13:21:35,430 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da, entries=150, sequenceid=56, filesize=11.7 K 2024-11-23T13:21:35,431 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 329ab862a28643091a2def94193b04dc in 970ms, sequenceid=56, compaction requested=true 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:35,431 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:35,431 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:35,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:35,433 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,433 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,433 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=115.9 K 2024-11-23T13:21:35,433 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=35.2 K 2024-11-23T13:21:35,433 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e] 2024-11-23T13:21:35,433 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 019accfee80d4b85b9eb4e17c6e2c8c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732368093150 2024-11-23T13:21:35,434 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c11251b964941bbb4b14ec8c6a16332, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732368093150 2024-11-23T13:21:35,434 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 829b398771ac4abfb3e4d1c785aaac4f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732368093194 2024-11-23T13:21:35,434 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1bc792fd82a45caa3276205ef6ea27a, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732368093189 2024-11-23T13:21:35,435 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff7381550ce4f6d91be6e7fc194954c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:35,435 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8903b8263a934fba936b19de9c4d569e, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:35,451 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:35,451 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/8a8a6e14036e4d3e823a0be098350be6 is 50, key is test_row_0/B:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:35,454 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,458 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411230eea1495481f458c80e16621115bac56_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741998_1174 (size=12104) 2024-11-23T13:21:35,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,461 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:35,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,469 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411230eea1495481f458c80e16621115bac56_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,469 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230eea1495481f458c80e16621115bac56_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231d18593a8b2b4a3a8d63ccce328b07bb_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368094471/Put/seqid=0 2024-11-23T13:21:35,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741999_1175 (size=4469) 2024-11-23T13:21:35,488 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#154 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:35,490 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/2841fd4a0924464488a024de3be045c5 is 175, key is test_row_0/A:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:35,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742000_1176 (size=12154) 2024-11-23T13:21:35,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742001_1177 (size=31058) 2024-11-23T13:21:35,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,511 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231d18593a8b2b4a3a8d63ccce328b07bb_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231d18593a8b2b4a3a8d63ccce328b07bb_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:35,513 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/2841fd4a0924464488a024de3be045c5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5 2024-11-23T13:21:35,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/48e8745c33ab4a0e81e0c3f9bdc7b386, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/48e8745c33ab4a0e81e0c3f9bdc7b386 is 175, key is test_row_0/A:col10/1732368094471/Put/seqid=0 2024-11-23T13:21:35,520 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into 2841fd4a0924464488a024de3be045c5(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:35,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,520 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368095431; duration=0sec 2024-11-23T13:21:35,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:35,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:35,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:35,522 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:35,522 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:35,522 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,523 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=35.2 K 2024-11-23T13:21:35,523 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0d3e71fd37a4feea530420e3490185f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732368093150 2024-11-23T13:21:35,524 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc92b4f700d3418aaccd5117fc865d6e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732368093194 2024-11-23T13:21:35,524 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d9790bf543a4edf858c16f4da7246da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:35,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742002_1178 (size=30955) 2024-11-23T13:21:35,539 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/48e8745c33ab4a0e81e0c3f9bdc7b386 2024-11-23T13:21:35,541 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#156 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:35,542 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32 is 50, key is test_row_0/C:col10/1732368093835/Put/seqid=0 2024-11-23T13:21:35,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/38136ff66cc8455784e3d63afc0f01a4 is 50, key is test_row_0/B:col10/1732368094471/Put/seqid=0 2024-11-23T13:21:35,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742003_1179 (size=12104) 2024-11-23T13:21:35,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742004_1180 (size=12001) 2024-11-23T13:21:35,584 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/38136ff66cc8455784e3d63afc0f01a4 2024-11-23T13:21:35,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/072aae490f6f4142a8a08943fcfe7843 is 50, key is test_row_0/C:col10/1732368094471/Put/seqid=0 2024-11-23T13:21:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742005_1181 (size=12001) 2024-11-23T13:21:35,612 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/072aae490f6f4142a8a08943fcfe7843 2024-11-23T13:21:35,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:35,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:35,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/48e8745c33ab4a0e81e0c3f9bdc7b386 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386 2024-11-23T13:21:35,630 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386, entries=150, sequenceid=78, filesize=30.2 K 2024-11-23T13:21:35,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/38136ff66cc8455784e3d63afc0f01a4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4 2024-11-23T13:21:35,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,637 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:21:35,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/072aae490f6f4142a8a08943fcfe7843 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843 2024-11-23T13:21:35,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,643 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:21:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,647 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=67.09 KB/68700 for 329ab862a28643091a2def94193b04dc in 186ms, sequenceid=78, compaction requested=false 2024-11-23T13:21:35,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-23T13:21:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-23T13:21:35,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,651 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-23T13:21:35,651 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5060 sec 2024-11-23T13:21:35,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.5110 sec 2024-11-23T13:21:35,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:35,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:35,700 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b13fbb45273f4ab39585be886360f18c_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:35,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742006_1182 (size=14594) 2024-11-23T13:21:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,726 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,732 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b13fbb45273f4ab39585be886360f18c_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b13fbb45273f4ab39585be886360f18c_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:35,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,733 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/38db820cfb8347ef8c2e7a43a5ba2cbc, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/38db820cfb8347ef8c2e7a43a5ba2cbc is 175, key is test_row_0/A:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:35,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742007_1183 (size=39545) 2024-11-23T13:21:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,756 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/38db820cfb8347ef8c2e7a43a5ba2cbc 2024-11-23T13:21:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f1986abb333b4bf5a2cadaa81ec56781 is 50, key is test_row_0/B:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:35,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742008_1184 (size=9657) 2024-11-23T13:21:35,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368155779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368155780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368155784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f1986abb333b4bf5a2cadaa81ec56781 2024-11-23T13:21:35,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368155785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368155786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b514bb39a6d84642babf5cde55c8b3f7 is 50, key is test_row_0/C:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:35,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742009_1185 (size=9657) 2024-11-23T13:21:35,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b514bb39a6d84642babf5cde55c8b3f7 2024-11-23T13:21:35,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/38db820cfb8347ef8c2e7a43a5ba2cbc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc 2024-11-23T13:21:35,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc, entries=200, sequenceid=94, filesize=38.6 K 2024-11-23T13:21:35,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f1986abb333b4bf5a2cadaa81ec56781 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781 2024-11-23T13:21:35,842 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781, entries=100, sequenceid=94, filesize=9.4 K 2024-11-23T13:21:35,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b514bb39a6d84642babf5cde55c8b3f7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7 2024-11-23T13:21:35,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7, entries=100, sequenceid=94, filesize=9.4 K 2024-11-23T13:21:35,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 329ab862a28643091a2def94193b04dc in 153ms, sequenceid=94, compaction requested=true 2024-11-23T13:21:35,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:35,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:35,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:35,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:21:35,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:35,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-23T13:21:35,867 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/8a8a6e14036e4d3e823a0be098350be6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8a8a6e14036e4d3e823a0be098350be6 2024-11-23T13:21:35,875 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 8a8a6e14036e4d3e823a0be098350be6(size=11.8 K), total size for store is 33.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:35,875 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,875 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368095431; duration=0sec 2024-11-23T13:21:35,875 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-23T13:21:35,875 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:35,875 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:35,875 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:35,876 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:35,876 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:35,876 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:35,876 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:35,877 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:35,878 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:35,878 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:35,879 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,879 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8a8a6e14036e4d3e823a0be098350be6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=33.0 K 2024-11-23T13:21:35,880 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a8a6e14036e4d3e823a0be098350be6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:35,881 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 38136ff66cc8455784e3d63afc0f01a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368094471 2024-11-23T13:21:35,881 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f1986abb333b4bf5a2cadaa81ec56781, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732368095625 2024-11-23T13:21:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:35,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:21:35,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:35,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,894 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:35,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:35,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:35,895 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#162 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:35,896 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/56cfddb2761047a9bda14062d78ebd31 is 50, key is test_row_0/B:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:35,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368155902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368155903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368155904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368155908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368155908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:35,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742010_1186 (size=12207) 2024-11-23T13:21:35,931 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f682c971c47b4f29844c1d0d72fe7703_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368095784/Put/seqid=0 2024-11-23T13:21:35,932 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/56cfddb2761047a9bda14062d78ebd31 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/56cfddb2761047a9bda14062d78ebd31 2024-11-23T13:21:35,940 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 56cfddb2761047a9bda14062d78ebd31(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:35,940 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,940 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368095852; duration=0sec 2024-11-23T13:21:35,940 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:35,940 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:35,940 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:35,941 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101558 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:35,941 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:35,942 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,942 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=99.2 K 2024-11-23T13:21:35,942 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:35,942 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc] 2024-11-23T13:21:35,943 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2841fd4a0924464488a024de3be045c5, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:35,944 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 48e8745c33ab4a0e81e0c3f9bdc7b386, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368094471 2024-11-23T13:21:35,944 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 38db820cfb8347ef8c2e7a43a5ba2cbc, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732368095625 2024-11-23T13:21:35,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742011_1187 (size=14594) 2024-11-23T13:21:35,960 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,964 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123a68e8e5ea349452cad7abc4b9dfb2a4f_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,967 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123a68e8e5ea349452cad7abc4b9dfb2a4f_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,967 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a68e8e5ea349452cad7abc4b9dfb2a4f_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:35,979 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32 2024-11-23T13:21:35,984 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into 8fcb50e3fe4e48d2bf3c1c363bb8fa32(size=11.8 K), total size for store is 33.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:35,984 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:35,985 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=13, startTime=1732368095431; duration=0sec 2024-11-23T13:21:35,985 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:35,985 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:35,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742012_1188 (size=4469) 2024-11-23T13:21:35,999 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#164 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:36,000 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a076e5e45972414b865ee51f27682fd4 is 175, key is test_row_0/A:col10/1732368095625/Put/seqid=0 2024-11-23T13:21:36,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368156010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368156010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368156011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368156015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368156015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742013_1189 (size=31268) 2024-11-23T13:21:36,041 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a076e5e45972414b865ee51f27682fd4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4 2024-11-23T13:21:36,049 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into a076e5e45972414b865ee51f27682fd4(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:36,049 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:36,049 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368095851; duration=0sec 2024-11-23T13:21:36,049 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:36,049 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:36,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368156215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368156215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368156217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368156218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368156218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,359 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:36,365 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f682c971c47b4f29844c1d0d72fe7703_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f682c971c47b4f29844c1d0d72fe7703_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:36,367 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e52944402fab4a49b330eb99d44d9266, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:36,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e52944402fab4a49b330eb99d44d9266 is 175, key is test_row_0/A:col10/1732368095784/Put/seqid=0 2024-11-23T13:21:36,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742014_1190 (size=39549) 2024-11-23T13:21:36,377 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e52944402fab4a49b330eb99d44d9266 2024-11-23T13:21:36,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/95ebef089cc44a88b01a88d5bf79ee44 is 50, key is test_row_0/B:col10/1732368095784/Put/seqid=0 2024-11-23T13:21:36,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742015_1191 (size=12001) 2024-11-23T13:21:36,408 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/95ebef089cc44a88b01a88d5bf79ee44 2024-11-23T13:21:36,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/7bec4db5054747dea0b47a1ee850b442 is 50, key is test_row_0/C:col10/1732368095784/Put/seqid=0 2024-11-23T13:21:36,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742016_1192 (size=12001) 2024-11-23T13:21:36,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/7bec4db5054747dea0b47a1ee850b442 2024-11-23T13:21:36,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e52944402fab4a49b330eb99d44d9266 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266 2024-11-23T13:21:36,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266, entries=200, sequenceid=119, filesize=38.6 K 2024-11-23T13:21:36,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/95ebef089cc44a88b01a88d5bf79ee44 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44 2024-11-23T13:21:36,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T13:21:36,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/7bec4db5054747dea0b47a1ee850b442 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442 2024-11-23T13:21:36,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442, entries=150, sequenceid=119, filesize=11.7 K 2024-11-23T13:21:36,475 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-23T13:21:36,477 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 329ab862a28643091a2def94193b04dc in 585ms, sequenceid=119, compaction requested=true 2024-11-23T13:21:36,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:36,479 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:36,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:36,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:36,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:36,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:36,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:36,480 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:36,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:36,481 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:36,481 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:36,481 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:36,481 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:36,482 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:36,482 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:36,482 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:36,482 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=44.7 K 2024-11-23T13:21:36,483 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fcb50e3fe4e48d2bf3c1c363bb8fa32, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1732368093835 2024-11-23T13:21:36,483 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 072aae490f6f4142a8a08943fcfe7843, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368094471 2024-11-23T13:21:36,484 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b514bb39a6d84642babf5cde55c8b3f7, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732368095625 2024-11-23T13:21:36,484 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bec4db5054747dea0b47a1ee850b442, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732368095784 2024-11-23T13:21:36,495 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:36,496 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f is 50, key is test_row_0/C:col10/1732368095784/Put/seqid=0 2024-11-23T13:21:36,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742017_1193 (size=12241) 2024-11-23T13:21:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:36,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:36,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:36,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123147440e7b3f64301a160043e53cad339_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:36,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742018_1194 (size=17284) 2024-11-23T13:21:36,549 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:36,555 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123147440e7b3f64301a160043e53cad339_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123147440e7b3f64301a160043e53cad339_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:36,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368156547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368156548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368156549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368156551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368156555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,557 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/24a6d7758ea741628d2f627cc6961319, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:36,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/24a6d7758ea741628d2f627cc6961319 is 175, key is test_row_0/A:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:36,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742019_1195 (size=48389) 2024-11-23T13:21:36,569 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/24a6d7758ea741628d2f627cc6961319 2024-11-23T13:21:36,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dfa527b572db4195b5ea59230673a7fc is 50, key is test_row_0/B:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:36,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742020_1196 (size=12151) 2024-11-23T13:21:36,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dfa527b572db4195b5ea59230673a7fc 2024-11-23T13:21:36,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cf7257838e7c47c898ee7e7944ba9479 is 50, key is test_row_0/C:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:36,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742021_1197 (size=12151) 2024-11-23T13:21:36,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368156657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368156657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368156657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368156657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368156658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368156860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368156860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368156860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368156860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368156861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:36,921 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f 2024-11-23T13:21:36,929 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into 676ec2d1d58f4ecc8bfc4a45a2e29f5f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:36,929 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:36,929 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=12, startTime=1732368096480; duration=0sec 2024-11-23T13:21:36,929 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:36,929 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:37,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cf7257838e7c47c898ee7e7944ba9479 2024-11-23T13:21:37,032 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/24a6d7758ea741628d2f627cc6961319 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319 2024-11-23T13:21:37,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319, entries=250, sequenceid=138, filesize=47.3 K 2024-11-23T13:21:37,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dfa527b572db4195b5ea59230673a7fc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc 2024-11-23T13:21:37,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc, entries=150, sequenceid=138, filesize=11.9 K 2024-11-23T13:21:37,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cf7257838e7c47c898ee7e7944ba9479 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479 2024-11-23T13:21:37,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479, entries=150, sequenceid=138, filesize=11.9 K 2024-11-23T13:21:37,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 329ab862a28643091a2def94193b04dc in 530ms, sequenceid=138, compaction requested=true 2024-11-23T13:21:37,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:37,054 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:37,054 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:37,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:37,055 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119206 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:37,055 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:37,055 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,055 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=116.4 K 2024-11-23T13:21:37,056 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,056 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319] 2024-11-23T13:21:37,056 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:37,056 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:37,056 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,056 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/56cfddb2761047a9bda14062d78ebd31, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=35.5 K 2024-11-23T13:21:37,057 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a076e5e45972414b865ee51f27682fd4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732368094498 2024-11-23T13:21:37,057 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56cfddb2761047a9bda14062d78ebd31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1732368094498 2024-11-23T13:21:37,057 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e52944402fab4a49b330eb99d44d9266, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732368095779 2024-11-23T13:21:37,057 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95ebef089cc44a88b01a88d5bf79ee44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732368095784 2024-11-23T13:21:37,058 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 24a6d7758ea741628d2f627cc6961319, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732368095906 2024-11-23T13:21:37,058 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfa527b572db4195b5ea59230673a7fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732368096520 2024-11-23T13:21:37,075 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#171 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:37,075 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/18f482ecdc934ef68829eb705b5ce56d is 50, key is test_row_0/B:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:37,085 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:37,090 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123d5b695bc66794afdaf765860dc413a77_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:37,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123d5b695bc66794afdaf765860dc413a77_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:37,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123d5b695bc66794afdaf765860dc413a77_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742022_1198 (size=12459) 2024-11-23T13:21:37,104 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/18f482ecdc934ef68829eb705b5ce56d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/18f482ecdc934ef68829eb705b5ce56d 2024-11-23T13:21:37,115 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 18f482ecdc934ef68829eb705b5ce56d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:37,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:37,115 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368097054; duration=0sec 2024-11-23T13:21:37,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:37,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:37,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742023_1199 (size=4469) 2024-11-23T13:21:37,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:37,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:37,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:37,118 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:37,119 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#172 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:37,119 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d03f117e44974c54b61fa24844227889 is 175, key is test_row_0/A:col10/1732368096522/Put/seqid=0 2024-11-23T13:21:37,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742024_1200 (size=31413) 2024-11-23T13:21:37,145 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d03f117e44974c54b61fa24844227889 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889 2024-11-23T13:21:37,169 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into d03f117e44974c54b61fa24844227889(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:37,169 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:37,169 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368097054; duration=0sec 2024-11-23T13:21:37,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T13:21:37,169 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:37,169 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:37,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:37,170 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368157181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368157182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368157185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368157186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368157186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112398f24e24f93f4377b60db95a78fe9d93_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368097166/Put/seqid=0 2024-11-23T13:21:37,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742025_1201 (size=12304) 2024-11-23T13:21:37,233 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:37,241 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112398f24e24f93f4377b60db95a78fe9d93_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112398f24e24f93f4377b60db95a78fe9d93_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:37,243 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/1fed186fc19d4be19fbd9f87ecfddfbd, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:37,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/1fed186fc19d4be19fbd9f87ecfddfbd is 175, key is test_row_0/A:col10/1732368097166/Put/seqid=0 2024-11-23T13:21:37,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-23T13:21:37,249 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-23T13:21:37,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742026_1202 (size=31105) 2024-11-23T13:21:37,251 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=162, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/1fed186fc19d4be19fbd9f87ecfddfbd 2024-11-23T13:21:37,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:37,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-23T13:21:37,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:37,256 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:37,257 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:37,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:37,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc69ad943e9c405e93943aa7ebc47a40 is 50, key is test_row_0/B:col10/1732368097166/Put/seqid=0 2024-11-23T13:21:37,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742027_1203 (size=12151) 2024-11-23T13:21:37,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc69ad943e9c405e93943aa7ebc47a40 2024-11-23T13:21:37,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368157288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368157288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368157289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fdb5deea460748c59cec6e36916e3539 is 50, key is test_row_0/C:col10/1732368097166/Put/seqid=0 2024-11-23T13:21:37,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742028_1204 (size=12151) 2024-11-23T13:21:37,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368157312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368157312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:37,409 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:37,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368157490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368157490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368157491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368157513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368157515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:37,562 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,562 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:37,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:37,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,563 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=162 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fdb5deea460748c59cec6e36916e3539 2024-11-23T13:21:37,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/1fed186fc19d4be19fbd9f87ecfddfbd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd 2024-11-23T13:21:37,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:37,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:37,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd, entries=150, sequenceid=162, filesize=30.4 K 2024-11-23T13:21:37,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc69ad943e9c405e93943aa7ebc47a40 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40 2024-11-23T13:21:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40, entries=150, sequenceid=162, filesize=11.9 K 2024-11-23T13:21:37,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fdb5deea460748c59cec6e36916e3539 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539 2024-11-23T13:21:37,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539, entries=150, sequenceid=162, filesize=11.9 K 2024-11-23T13:21:37,730 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 329ab862a28643091a2def94193b04dc in 561ms, sequenceid=162, compaction requested=true 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:37,731 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:37,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:21:37,731 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:37,733 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:37,733 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:37,733 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:37,733 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:37,733 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36543 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:37,734 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:37,734 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:37,734 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=35.7 K 2024-11-23T13:21:37,735 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 676ec2d1d58f4ecc8bfc4a45a2e29f5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1732368095784 2024-11-23T13:21:37,735 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cf7257838e7c47c898ee7e7944ba9479, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732368096520 2024-11-23T13:21:37,735 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fdb5deea460748c59cec6e36916e3539, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732368096547 2024-11-23T13:21:37,744 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#176 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:37,745 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/849fd21c65aa4578a9d2544c6f9effb5 is 50, key is test_row_0/C:col10/1732368097166/Put/seqid=0 2024-11-23T13:21:37,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742029_1205 (size=12493) 2024-11-23T13:21:37,764 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/849fd21c65aa4578a9d2544c6f9effb5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/849fd21c65aa4578a9d2544c6f9effb5 2024-11-23T13:21:37,772 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into 849fd21c65aa4578a9d2544c6f9effb5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:37,772 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:37,772 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=13, startTime=1732368097731; duration=0sec 2024-11-23T13:21:37,772 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:37,772 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:37,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:37,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-23T13:21:37,796 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:37,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:37,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112391b5a2527ce94054915ca2db64c5a526_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:37,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368157818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368157818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742030_1206 (size=19774) 2024-11-23T13:21:37,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368157820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368157821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368157822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:37,869 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,870 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:37,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:37,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:37,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:37,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368157927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368157927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:37,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368157927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,023 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368158131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368158131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368158132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,176 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,177 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,224 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:38,229 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112391b5a2527ce94054915ca2db64c5a526_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112391b5a2527ce94054915ca2db64c5a526_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:38,230 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd0958482dbd47d4b68a2c9fac6ff415, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:38,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd0958482dbd47d4b68a2c9fac6ff415 is 175, key is test_row_0/A:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:38,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742031_1207 (size=57033) 2024-11-23T13:21:38,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368158325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368158327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,329 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:38,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368158435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368158437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368158438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,482 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,635 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,636 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=181, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd0958482dbd47d4b68a2c9fac6ff415 2024-11-23T13:21:38,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/325ec228506245cb9044ebec47fb0f21 is 50, key is test_row_0/B:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:38,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742032_1208 (size=12151) 2024-11-23T13:21:38,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368158937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368158941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:38,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368158944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,949 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:38,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:38,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:38,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:38,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:38,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,070 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/325ec228506245cb9044ebec47fb0f21 2024-11-23T13:21:39,078 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/77c5427e1c7c480bb53b849a014be1b2 is 50, key is test_row_0/C:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:39,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742033_1209 (size=12151) 2024-11-23T13:21:39,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:39,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:39,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368159331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:39,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368159336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:39,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:39,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:39,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,410 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,410 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:39,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=181 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/77c5427e1c7c480bb53b849a014be1b2 2024-11-23T13:21:39,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd0958482dbd47d4b68a2c9fac6ff415 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415 2024-11-23T13:21:39,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415, entries=300, sequenceid=181, filesize=55.7 K 2024-11-23T13:21:39,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/325ec228506245cb9044ebec47fb0f21 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21 2024-11-23T13:21:39,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21, entries=150, sequenceid=181, filesize=11.9 K 2024-11-23T13:21:39,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/77c5427e1c7c480bb53b849a014be1b2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2 2024-11-23T13:21:39,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2, entries=150, sequenceid=181, filesize=11.9 K 2024-11-23T13:21:39,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 329ab862a28643091a2def94193b04dc in 1723ms, sequenceid=181, compaction requested=true 2024-11-23T13:21:39,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:39,519 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:39,519 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:39,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:39,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:39,520 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:39,520 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:39,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:39,520 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,520 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,520 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/18f482ecdc934ef68829eb705b5ce56d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=35.9 K 2024-11-23T13:21:39,520 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=116.7 K 2024-11-23T13:21:39,520 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,520 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415] 2024-11-23T13:21:39,521 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 18f482ecdc934ef68829eb705b5ce56d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732368096520 2024-11-23T13:21:39,521 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d03f117e44974c54b61fa24844227889, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1732368096520 2024-11-23T13:21:39,521 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting dc69ad943e9c405e93943aa7ebc47a40, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732368096547 2024-11-23T13:21:39,522 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fed186fc19d4be19fbd9f87ecfddfbd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732368096547 2024-11-23T13:21:39,522 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd0958482dbd47d4b68a2c9fac6ff415, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732368097181 2024-11-23T13:21:39,522 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 325ec228506245cb9044ebec47fb0f21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732368097184 2024-11-23T13:21:39,531 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:39,534 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#181 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:39,535 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/97282304dd2a41f081ec545c4ee66025 is 50, key is test_row_0/B:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:39,540 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123c858ce648c334ac5833e3d3ad38f0fd7_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:39,543 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123c858ce648c334ac5833e3d3ad38f0fd7_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:39,543 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c858ce648c334ac5833e3d3ad38f0fd7_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:39,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-23T13:21:39,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:39,563 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T13:21:39,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:39,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:39,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:39,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:39,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742034_1210 (size=12561) 2024-11-23T13:21:39,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742035_1211 (size=4469) 2024-11-23T13:21:39,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123214865e159db4a5aa078ae6453e6a607_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368097820/Put/seqid=0 2024-11-23T13:21:39,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742036_1212 (size=12304) 2024-11-23T13:21:39,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:39,633 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123214865e159db4a5aa078ae6453e6a607_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123214865e159db4a5aa078ae6453e6a607_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:39,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba77b3abad3f44428c4eac5427ff0af4, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:39,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba77b3abad3f44428c4eac5427ff0af4 is 175, key is test_row_0/A:col10/1732368097820/Put/seqid=0 2024-11-23T13:21:39,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742037_1213 (size=31105) 2024-11-23T13:21:39,644 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba77b3abad3f44428c4eac5427ff0af4 2024-11-23T13:21:39,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/8da1a2c2c12545abaa1d4de3609bda3e is 50, key is test_row_0/B:col10/1732368097820/Put/seqid=0 2024-11-23T13:21:39,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742038_1214 (size=12151) 2024-11-23T13:21:39,666 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/8da1a2c2c12545abaa1d4de3609bda3e 2024-11-23T13:21:39,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/531227f6aada45938c99895ca0f12726 is 50, key is test_row_0/C:col10/1732368097820/Put/seqid=0 2024-11-23T13:21:39,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742039_1215 (size=12151) 2024-11-23T13:21:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:39,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:39,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368159966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368159968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368159970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:39,988 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#180 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:39,989 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd7605ca831247c39fcdcc14e3bee9d3 is 175, key is test_row_0/A:col10/1732368097795/Put/seqid=0 2024-11-23T13:21:39,990 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/97282304dd2a41f081ec545c4ee66025 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/97282304dd2a41f081ec545c4ee66025 2024-11-23T13:21:39,996 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 97282304dd2a41f081ec545c4ee66025(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:39,996 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:39,996 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368099519; duration=0sec 2024-11-23T13:21:39,996 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:39,996 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:39,997 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-23T13:21:39,997 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-23T13:21:39,997 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-23T13:21:39,997 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. because compaction request was cancelled 2024-11-23T13:21:39,997 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:40,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742040_1216 (size=31515) 2024-11-23T13:21:40,006 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/cd7605ca831247c39fcdcc14e3bee9d3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3 2024-11-23T13:21:40,020 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into cd7605ca831247c39fcdcc14e3bee9d3(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:40,020 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:40,020 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368099519; duration=0sec 2024-11-23T13:21:40,020 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:40,020 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:40,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368160071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368160072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368160073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,081 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/531227f6aada45938c99895ca0f12726 2024-11-23T13:21:40,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba77b3abad3f44428c4eac5427ff0af4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4 2024-11-23T13:21:40,092 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4, entries=150, sequenceid=199, filesize=30.4 K 2024-11-23T13:21:40,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/8da1a2c2c12545abaa1d4de3609bda3e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e 2024-11-23T13:21:40,097 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e, entries=150, sequenceid=199, filesize=11.9 K 2024-11-23T13:21:40,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/531227f6aada45938c99895ca0f12726 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726 2024-11-23T13:21:40,103 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726, entries=150, sequenceid=199, filesize=11.9 K 2024-11-23T13:21:40,104 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 329ab862a28643091a2def94193b04dc in 541ms, sequenceid=199, compaction requested=true 2024-11-23T13:21:40,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:40,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:40,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-23T13:21:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-23T13:21:40,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-23T13:21:40,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8480 sec 2024-11-23T13:21:40,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.8540 sec 2024-11-23T13:21:40,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:40,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-23T13:21:40,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:40,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:40,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:40,276 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123097321f2acee4b0cac1c74df45847f4b_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742041_1217 (size=14794) 2024-11-23T13:21:40,290 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:40,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368160290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,294 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123097321f2acee4b0cac1c74df45847f4b_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123097321f2acee4b0cac1c74df45847f4b_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:40,296 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d22396ec3b2045b78d635d6066d0acd7, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368160290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d22396ec3b2045b78d635d6066d0acd7 is 175, key is test_row_0/A:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368160291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742042_1218 (size=39749) 2024-11-23T13:21:40,303 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d22396ec3b2045b78d635d6066d0acd7 2024-11-23T13:21:40,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0b68aa8bdad2418183ace2c31715f6da is 50, key is test_row_0/B:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742043_1219 (size=12151) 2024-11-23T13:21:40,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0b68aa8bdad2418183ace2c31715f6da 2024-11-23T13:21:40,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fca8e3be2f3f490b8f29e8f63d5d1256 is 50, key is test_row_0/C:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742044_1220 (size=12151) 2024-11-23T13:21:40,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368160394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368160397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368160397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368160597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368160602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368160603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,744 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fca8e3be2f3f490b8f29e8f63d5d1256 2024-11-23T13:21:40,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d22396ec3b2045b78d635d6066d0acd7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7 2024-11-23T13:21:40,753 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7, entries=200, sequenceid=221, filesize=38.8 K 2024-11-23T13:21:40,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0b68aa8bdad2418183ace2c31715f6da as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da 2024-11-23T13:21:40,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T13:21:40,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fca8e3be2f3f490b8f29e8f63d5d1256 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256 2024-11-23T13:21:40,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256, entries=150, sequenceid=221, filesize=11.9 K 2024-11-23T13:21:40,765 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 329ab862a28643091a2def94193b04dc in 490ms, sequenceid=221, compaction requested=true 2024-11-23T13:21:40,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:40,765 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:40,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:40,765 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:40,765 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:40,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:40,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:40,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:40,766 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:40,766 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:40,766 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:40,767 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:40,767 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=100.0 K 2024-11-23T13:21:40,767 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:40,767 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7] 2024-11-23T13:21:40,767 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:40,767 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:40,767 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:40,767 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/97282304dd2a41f081ec545c4ee66025, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=36.0 K 2024-11-23T13:21:40,767 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd7605ca831247c39fcdcc14e3bee9d3, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732368097184 2024-11-23T13:21:40,768 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 97282304dd2a41f081ec545c4ee66025, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732368097184 2024-11-23T13:21:40,768 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba77b3abad3f44428c4eac5427ff0af4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732368097814 2024-11-23T13:21:40,768 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8da1a2c2c12545abaa1d4de3609bda3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732368097814 2024-11-23T13:21:40,768 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d22396ec3b2045b78d635d6066d0acd7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099963 2024-11-23T13:21:40,769 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b68aa8bdad2418183ace2c31715f6da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099967 2024-11-23T13:21:40,775 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,777 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:40,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/4d409054705e45af9bb31929ed38aa4c is 50, key is test_row_0/B:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,779 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411232d8c8b06f10d4196879b42e713380537_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,782 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411232d8c8b06f10d4196879b42e713380537_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,782 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232d8c8b06f10d4196879b42e713380537_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742045_1221 (size=12663) 2024-11-23T13:21:40,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742046_1222 (size=4469) 2024-11-23T13:21:40,812 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/4d409054705e45af9bb31929ed38aa4c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/4d409054705e45af9bb31929ed38aa4c 2024-11-23T13:21:40,818 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 4d409054705e45af9bb31929ed38aa4c(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:40,818 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:40,818 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368100765; duration=0sec 2024-11-23T13:21:40,818 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:40,818 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:40,818 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:40,819 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:40,819 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:40,819 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:40,820 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/849fd21c65aa4578a9d2544c6f9effb5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=47.8 K 2024-11-23T13:21:40,820 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 849fd21c65aa4578a9d2544c6f9effb5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=162, earliestPutTs=1732368096547 2024-11-23T13:21:40,820 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 77c5427e1c7c480bb53b849a014be1b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=181, earliestPutTs=1732368097184 2024-11-23T13:21:40,821 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 531227f6aada45938c99895ca0f12726, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732368097814 2024-11-23T13:21:40,821 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fca8e3be2f3f490b8f29e8f63d5d1256, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099967 2024-11-23T13:21:40,831 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#190 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:40,832 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/ca91ec899d96490e9042e01c6da51815 is 50, key is test_row_0/C:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:40,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742047_1223 (size=12629) 2024-11-23T13:21:40,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:40,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:40,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:40,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112341680f5446ca4e158afb7cafe7482aa9_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368100903/Put/seqid=0 2024-11-23T13:21:40,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742048_1224 (size=12304) 2024-11-23T13:21:40,918 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:40,922 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112341680f5446ca4e158afb7cafe7482aa9_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112341680f5446ca4e158afb7cafe7482aa9_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:40,923 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/dfaa0c177787464f9ebffa861cba3a1c, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:40,924 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/dfaa0c177787464f9ebffa861cba3a1c is 175, key is test_row_0/A:col10/1732368100903/Put/seqid=0 2024-11-23T13:21:40,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368160923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368160924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:40,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742049_1225 (size=31105) 2024-11-23T13:21:40,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:40,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368160926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368161027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368161028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368161031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:21:41,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T13:21:41,207 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#188 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:41,207 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba3c825e25204d9ca62952e4ee3ac299 is 175, key is test_row_0/A:col10/1732368099967/Put/seqid=0 2024-11-23T13:21:41,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742050_1226 (size=31617) 2024-11-23T13:21:41,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368161230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368161231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368161234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,247 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/ca91ec899d96490e9042e01c6da51815 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/ca91ec899d96490e9042e01c6da51815 2024-11-23T13:21:41,253 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into ca91ec899d96490e9042e01c6da51815(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:41,253 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:41,253 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=12, startTime=1732368100766; duration=0sec 2024-11-23T13:21:41,253 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:41,253 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:41,329 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=240, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/dfaa0c177787464f9ebffa861cba3a1c 2024-11-23T13:21:41,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/9f53c6fb1fb9483296879db636920535 is 50, key is test_row_0/B:col10/1732368100903/Put/seqid=0 2024-11-23T13:21:41,340 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368161338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,341 DEBUG [Thread-767 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742051_1227 (size=12151) 2024-11-23T13:21:41,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368161350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,350 DEBUG [Thread-775 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-23T13:21:41,363 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-23T13:21:41,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-23T13:21:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:41,366 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:41,367 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:41,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:41,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:41,518 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:41,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:41,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368161532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368161533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:41,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368161540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,618 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/ba3c825e25204d9ca62952e4ee3ac299 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299 2024-11-23T13:21:41,623 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into ba3c825e25204d9ca62952e4ee3ac299(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:41,623 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:41,623 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368100765; duration=0sec 2024-11-23T13:21:41,623 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:41,623 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:41,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:41,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,672 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/9f53c6fb1fb9483296879db636920535 2024-11-23T13:21:41,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/e2b3de0a38794a94b6d618d5c5f30234 is 50, key is test_row_0/C:col10/1732368100903/Put/seqid=0 2024-11-23T13:21:41,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742052_1228 (size=12151) 2024-11-23T13:21:41,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:41,978 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:41,978 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:41,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:41,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:42,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368162036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:42,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:42,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368162037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:42,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:42,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368162043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:42,131 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:42,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:42,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:42,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:42,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/e2b3de0a38794a94b6d618d5c5f30234 2024-11-23T13:21:42,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/dfaa0c177787464f9ebffa861cba3a1c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c 2024-11-23T13:21:42,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c, entries=150, sequenceid=240, filesize=30.4 K 2024-11-23T13:21:42,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/9f53c6fb1fb9483296879db636920535 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535 2024-11-23T13:21:42,177 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535, entries=150, sequenceid=240, filesize=11.9 K 2024-11-23T13:21:42,178 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/e2b3de0a38794a94b6d618d5c5f30234 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234 2024-11-23T13:21:42,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234, entries=150, sequenceid=240, filesize=11.9 K 2024-11-23T13:21:42,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 329ab862a28643091a2def94193b04dc in 1279ms, sequenceid=240, compaction requested=false 2024-11-23T13:21:42,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:42,284 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:42,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:42,285 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:42,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232d8477f4fbd64f0ea7fe3a7a9d9007d3_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368100922/Put/seqid=0 2024-11-23T13:21:42,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742053_1229 (size=12354) 2024-11-23T13:21:42,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:42,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:42,707 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411232d8477f4fbd64f0ea7fe3a7a9d9007d3_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232d8477f4fbd64f0ea7fe3a7a9d9007d3_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d9195b128b0b4e2d8ab6f3efd80a649d, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d9195b128b0b4e2d8ab6f3efd80a649d is 175, key is test_row_0/A:col10/1732368100922/Put/seqid=0 2024-11-23T13:21:42,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742054_1230 (size=31155) 2024-11-23T13:21:43,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:43,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:43,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368163059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368163061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368163062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,113 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d9195b128b0b4e2d8ab6f3efd80a649d 2024-11-23T13:21:43,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/bb6eb8aa8fa84a13817fbf20bb1a50eb is 50, key is test_row_0/B:col10/1732368100922/Put/seqid=0 2024-11-23T13:21:43,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742055_1231 (size=12201) 2024-11-23T13:21:43,164 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368163163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368163164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368163165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368163366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368163370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368163369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:43,533 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/bb6eb8aa8fa84a13817fbf20bb1a50eb 2024-11-23T13:21:43,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/17ae445ffaee46b8bab78ed468539855 is 50, key is test_row_0/C:col10/1732368100922/Put/seqid=0 2024-11-23T13:21:43,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742056_1232 (size=12201) 2024-11-23T13:21:43,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368163669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368163673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368163673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:43,950 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/17ae445ffaee46b8bab78ed468539855 2024-11-23T13:21:43,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/d9195b128b0b4e2d8ab6f3efd80a649d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d 2024-11-23T13:21:43,961 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d, entries=150, sequenceid=260, filesize=30.4 K 2024-11-23T13:21:43,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/bb6eb8aa8fa84a13817fbf20bb1a50eb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb 2024-11-23T13:21:43,966 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb, entries=150, sequenceid=260, filesize=11.9 K 2024-11-23T13:21:43,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/17ae445ffaee46b8bab78ed468539855 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855 2024-11-23T13:21:43,970 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855, entries=150, sequenceid=260, filesize=11.9 K 2024-11-23T13:21:43,971 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 329ab862a28643091a2def94193b04dc in 1686ms, sequenceid=260, compaction requested=true 2024-11-23T13:21:43,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:43,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:43,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-23T13:21:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-23T13:21:43,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-23T13:21:43,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6060 sec 2024-11-23T13:21:43,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.6100 sec 2024-11-23T13:21:44,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:44,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:44,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:44,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123310aa13aad46409a82876a9a9694a537_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:44,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742057_1233 (size=14994) 2024-11-23T13:21:44,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368164226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368164226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368164228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368164329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368164329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368164329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368164532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368164532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368164532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,592 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:44,596 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123310aa13aad46409a82876a9a9694a537_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123310aa13aad46409a82876a9a9694a537_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:44,597 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/10c84d14f46141b4b4cb2918362ccdba, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:44,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/10c84d14f46141b4b4cb2918362ccdba is 175, key is test_row_0/A:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:44,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742058_1234 (size=39949) 2024-11-23T13:21:44,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368164834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368164837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:44,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:44,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368164837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,004 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/10c84d14f46141b4b4cb2918362ccdba 2024-11-23T13:21:45,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/51d598bac790487dacb9a028007ff443 is 50, key is test_row_0/B:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:45,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742059_1235 (size=12301) 2024-11-23T13:21:45,017 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/51d598bac790487dacb9a028007ff443 2024-11-23T13:21:45,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fbdc0953f7854ceda2a489de0e8143f9 is 50, key is test_row_0/C:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:45,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742060_1236 (size=12301) 2024-11-23T13:21:45,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:45,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368165340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:45,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368165341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:45,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368165341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37494 deadline: 1732368165347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,349 DEBUG [Thread-767 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:45,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:45,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37498 deadline: 1732368165377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,380 DEBUG [Thread-775 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:21:45,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fbdc0953f7854ceda2a489de0e8143f9 2024-11-23T13:21:45,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/10c84d14f46141b4b4cb2918362ccdba as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba 2024-11-23T13:21:45,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba, entries=200, sequenceid=280, filesize=39.0 K 2024-11-23T13:21:45,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/51d598bac790487dacb9a028007ff443 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443 2024-11-23T13:21:45,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443, entries=150, sequenceid=280, filesize=12.0 K 2024-11-23T13:21:45,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/fbdc0953f7854ceda2a489de0e8143f9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9 2024-11-23T13:21:45,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9, entries=150, sequenceid=280, filesize=12.0 K 2024-11-23T13:21:45,453 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 329ab862a28643091a2def94193b04dc in 1277ms, sequenceid=280, compaction requested=true 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:45,454 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:45,454 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:45,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133826 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49316 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:45,456 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:45,456 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:45,456 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/4d409054705e45af9bb31929ed38aa4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=48.2 K 2024-11-23T13:21:45,456 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=130.7 K 2024-11-23T13:21:45,456 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba] 2024-11-23T13:21:45,456 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d409054705e45af9bb31929ed38aa4c, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099967 2024-11-23T13:21:45,457 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f53c6fb1fb9483296879db636920535, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732368100288 2024-11-23T13:21:45,457 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba3c825e25204d9ca62952e4ee3ac299, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099967 2024-11-23T13:21:45,457 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bb6eb8aa8fa84a13817fbf20bb1a50eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732368100921 2024-11-23T13:21:45,457 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfaa0c177787464f9ebffa861cba3a1c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732368100288 2024-11-23T13:21:45,458 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 51d598bac790487dacb9a028007ff443, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103060 2024-11-23T13:21:45,458 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9195b128b0b4e2d8ab6f3efd80a649d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732368100921 2024-11-23T13:21:45,458 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10c84d14f46141b4b4cb2918362ccdba, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103058 2024-11-23T13:21:45,468 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#200 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:45,468 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/c5eaa23698f64747aa891011be849c92 is 50, key is test_row_0/B:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:45,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-23T13:21:45,471 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-23T13:21:45,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-23T13:21:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T13:21:45,473 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:45,474 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:45,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:45,480 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:45,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742061_1237 (size=12949) 2024-11-23T13:21:45,501 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112390b45a9510ab4e41894f9c04d99c4d27_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:45,503 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112390b45a9510ab4e41894f9c04d99c4d27_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:45,503 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112390b45a9510ab4e41894f9c04d99c4d27_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:45,506 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/c5eaa23698f64747aa891011be849c92 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/c5eaa23698f64747aa891011be849c92 2024-11-23T13:21:45,513 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into c5eaa23698f64747aa891011be849c92(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:45,513 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:45,513 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=12, startTime=1732368105454; duration=0sec 2024-11-23T13:21:45,513 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:45,513 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:45,513 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:21:45,514 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49282 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:21:45,514 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:45,514 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:45,515 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/ca91ec899d96490e9042e01c6da51815, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=48.1 K 2024-11-23T13:21:45,515 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ca91ec899d96490e9042e01c6da51815, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1732368099967 2024-11-23T13:21:45,516 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e2b3de0a38794a94b6d618d5c5f30234, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1732368100288 2024-11-23T13:21:45,516 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 17ae445ffaee46b8bab78ed468539855, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1732368100921 2024-11-23T13:21:45,517 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fbdc0953f7854ceda2a489de0e8143f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103060 2024-11-23T13:21:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742062_1238 (size=4469) 2024-11-23T13:21:45,522 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#201 average throughput is 0.58 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:45,522 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/62ab31ac3f654d2cafb2858b81117f93 is 175, key is test_row_0/A:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:45,534 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:45,534 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8586e68680504de9bc272ea9bdf81d88 is 50, key is test_row_0/C:col10/1732368104174/Put/seqid=0 2024-11-23T13:21:45,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742063_1239 (size=31903) 2024-11-23T13:21:45,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742064_1240 (size=12915) 2024-11-23T13:21:45,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T13:21:45,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:45,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-23T13:21:45,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:45,627 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T13:21:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:45,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:45,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:45,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c0dddec17a644617a931084e9c56534f_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368104178/Put/seqid=0 2024-11-23T13:21:45,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742065_1241 (size=12454) 2024-11-23T13:21:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T13:21:45,888 INFO [master/ba2e440802a7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T13:21:45,888 INFO [master/ba2e440802a7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T13:21:45,944 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/62ab31ac3f654d2cafb2858b81117f93 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93 2024-11-23T13:21:45,950 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into 62ab31ac3f654d2cafb2858b81117f93(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:45,950 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:45,950 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=12, startTime=1732368105454; duration=0sec 2024-11-23T13:21:45,950 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:45,950 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:45,953 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8586e68680504de9bc272ea9bdf81d88 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8586e68680504de9bc272ea9bdf81d88 2024-11-23T13:21:45,958 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into 8586e68680504de9bc272ea9bdf81d88(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:45,958 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:45,958 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=12, startTime=1732368105454; duration=0sec 2024-11-23T13:21:45,958 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:45,958 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:46,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:46,047 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c0dddec17a644617a931084e9c56534f_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c0dddec17a644617a931084e9c56534f_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:46,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a227bb05cc514f31bb501ab58af951f2, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:46,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a227bb05cc514f31bb501ab58af951f2 is 175, key is test_row_0/A:col10/1732368104178/Put/seqid=0 2024-11-23T13:21:46,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742066_1242 (size=31255) 2024-11-23T13:21:46,065 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a227bb05cc514f31bb501ab58af951f2 2024-11-23T13:21:46,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dcc91cb677a540d187e89a0aae91eeff is 50, key is test_row_0/B:col10/1732368104178/Put/seqid=0 2024-11-23T13:21:46,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T13:21:46,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742067_1243 (size=12301) 2024-11-23T13:21:46,080 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dcc91cb677a540d187e89a0aae91eeff 2024-11-23T13:21:46,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8687fbf5765f4eed96ac74dc7f00a8a8 is 50, key is test_row_0/C:col10/1732368104178/Put/seqid=0 2024-11-23T13:21:46,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742068_1244 (size=12301) 2024-11-23T13:21:46,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:46,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:46,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368166366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368166367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368166367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368166468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368166469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368166471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,497 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8687fbf5765f4eed96ac74dc7f00a8a8 2024-11-23T13:21:46,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/a227bb05cc514f31bb501ab58af951f2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2 2024-11-23T13:21:46,508 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2, entries=150, sequenceid=297, filesize=30.5 K 2024-11-23T13:21:46,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dcc91cb677a540d187e89a0aae91eeff as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff 2024-11-23T13:21:46,514 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T13:21:46,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8687fbf5765f4eed96ac74dc7f00a8a8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8 2024-11-23T13:21:46,520 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8, entries=150, sequenceid=297, filesize=12.0 K 2024-11-23T13:21:46,521 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 329ab862a28643091a2def94193b04dc in 894ms, sequenceid=297, compaction requested=false 2024-11-23T13:21:46,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:46,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:46,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T13:21:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-23T13:21:46,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-23T13:21:46,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0490 sec 2024-11-23T13:21:46,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.0540 sec 2024-11-23T13:21:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-23T13:21:46,577 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-23T13:21:46,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-23T13:21:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:46,580 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:46,581 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:46,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:46,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:46,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:46,673 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:46,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112335c7114a17fb457bb491cf8e8864de05_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:46,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368166681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368166682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368166683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742069_1245 (size=17534) 2024-11-23T13:21:46,732 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:46,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:46,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:46,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:46,733 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368166785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368166786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368166787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:46,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:46,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:46,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:46,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:46,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:46,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368166988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368166989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:46,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:46,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368166990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,038 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:47,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:47,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,087 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:47,091 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112335c7114a17fb457bb491cf8e8864de05_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112335c7114a17fb457bb491cf8e8864de05_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:47,092 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/fa5df624cb3d43b9b541a977c8ca1f27, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:47,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/fa5df624cb3d43b9b541a977c8ca1f27 is 175, key is test_row_0/A:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742070_1246 (size=48639) 2024-11-23T13:21:47,098 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=322, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/fa5df624cb3d43b9b541a977c8ca1f27 2024-11-23T13:21:47,106 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc6c3c84db814b8e9bdfae19c3ef8755 is 50, key is test_row_0/B:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742071_1247 (size=12301) 2024-11-23T13:21:47,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:47,191 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:47,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368167291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368167293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368167294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,344 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:47,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,497 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:47,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:21:47,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc6c3c84db814b8e9bdfae19c3ef8755 2024-11-23T13:21:47,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/94e4fce5409b4105a33b7d0b74ac2846 is 50, key is test_row_0/C:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742072_1248 (size=12301) 2024-11-23T13:21:47,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/94e4fce5409b4105a33b7d0b74ac2846 2024-11-23T13:21:47,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/fa5df624cb3d43b9b541a977c8ca1f27 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27 2024-11-23T13:21:47,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27, entries=250, sequenceid=322, filesize=47.5 K 2024-11-23T13:21:47,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dc6c3c84db814b8e9bdfae19c3ef8755 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755 2024-11-23T13:21:47,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755, entries=150, sequenceid=322, filesize=12.0 K 2024-11-23T13:21:47,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/94e4fce5409b4105a33b7d0b74ac2846 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846 2024-11-23T13:21:47,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846, entries=150, sequenceid=322, filesize=12.0 K 2024-11-23T13:21:47,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 329ab862a28643091a2def94193b04dc in 872ms, sequenceid=322, compaction requested=true 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:47,543 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:47,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:47,543 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:47,545 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:47,545 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:47,545 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:47,545 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:47,545 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,545 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,545 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=109.2 K 2024-11-23T13:21:47,545 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/c5eaa23698f64747aa891011be849c92, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=36.7 K 2024-11-23T13:21:47,545 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,545 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27] 2024-11-23T13:21:47,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c5eaa23698f64747aa891011be849c92, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103060 2024-11-23T13:21:47,546 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62ab31ac3f654d2cafb2858b81117f93, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103060 2024-11-23T13:21:47,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting dcc91cb677a540d187e89a0aae91eeff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732368104178 2024-11-23T13:21:47,546 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a227bb05cc514f31bb501ab58af951f2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732368104178 2024-11-23T13:21:47,547 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting dc6c3c84db814b8e9bdfae19c3ef8755, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:47,547 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa5df624cb3d43b9b541a977c8ca1f27, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:47,553 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:47,554 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#209 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:47,555 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/a32973da69944cf5b4e6217442b34b93 is 50, key is test_row_0/B:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,557 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123402ff8efecbd4280b2f62ce377a3db61_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:47,560 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123402ff8efecbd4280b2f62ce377a3db61_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:47,560 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123402ff8efecbd4280b2f62ce377a3db61_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:47,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742074_1250 (size=4469) 2024-11-23T13:21:47,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742073_1249 (size=13051) 2024-11-23T13:21:47,649 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-23T13:21:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,650 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:47,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:47,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112384e9fab2aee24319a6a5ede647d77aa8_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368106682/Put/seqid=0 2024-11-23T13:21:47,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742075_1251 (size=12454) 2024-11-23T13:21:47,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:47,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:47,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:47,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368167822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368167823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368167824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368167925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368167928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:47,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368167928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:47,966 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#210 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:47,966 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e465148875794b66824178f55066ac78 is 175, key is test_row_0/A:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,971 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/a32973da69944cf5b4e6217442b34b93 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/a32973da69944cf5b4e6217442b34b93 2024-11-23T13:21:47,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742076_1252 (size=32005) 2024-11-23T13:21:47,978 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into a32973da69944cf5b4e6217442b34b93(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:47,978 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:47,978 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=13, startTime=1732368107543; duration=0sec 2024-11-23T13:21:47,978 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:47,978 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:47,978 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:21:47,979 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:21:47,980 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:47,980 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:47,980 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8586e68680504de9bc272ea9bdf81d88, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=36.6 K 2024-11-23T13:21:47,980 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8586e68680504de9bc272ea9bdf81d88, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1732368103060 2024-11-23T13:21:47,981 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8687fbf5765f4eed96ac74dc7f00a8a8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1732368104178 2024-11-23T13:21:47,981 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 94e4fce5409b4105a33b7d0b74ac2846, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:47,989 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:47,990 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/c3cf2cf96e464fc5beac5e85c6a1739a is 50, key is test_row_0/C:col10/1732368106670/Put/seqid=0 2024-11-23T13:21:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742077_1253 (size=13017) 2024-11-23T13:21:48,009 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/c3cf2cf96e464fc5beac5e85c6a1739a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/c3cf2cf96e464fc5beac5e85c6a1739a 2024-11-23T13:21:48,016 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into c3cf2cf96e464fc5beac5e85c6a1739a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:48,016 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:48,016 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=13, startTime=1732368107543; duration=0sec 2024-11-23T13:21:48,016 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:48,016 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:48,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:48,076 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112384e9fab2aee24319a6a5ede647d77aa8_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112384e9fab2aee24319a6a5ede647d77aa8_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:48,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/5eb13291a01743cd85051ef57616ee75, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:48,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/5eb13291a01743cd85051ef57616ee75 is 175, key is test_row_0/A:col10/1732368106682/Put/seqid=0 2024-11-23T13:21:48,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742078_1254 (size=31255) 2024-11-23T13:21:48,084 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=335, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/5eb13291a01743cd85051ef57616ee75 2024-11-23T13:21:48,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dae05d9ac7c84318ba301f72e756fc96 is 50, key is test_row_0/B:col10/1732368106682/Put/seqid=0 2024-11-23T13:21:48,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742079_1255 (size=12301) 2024-11-23T13:21:48,102 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dae05d9ac7c84318ba301f72e756fc96 2024-11-23T13:21:48,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cdbc57cf014143148faec6b063e4e00a is 50, key is test_row_0/C:col10/1732368106682/Put/seqid=0 2024-11-23T13:21:48,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742080_1256 (size=12301) 2024-11-23T13:21:48,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368168127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368168132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368168132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,378 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/e465148875794b66824178f55066ac78 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78 2024-11-23T13:21:48,383 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into e465148875794b66824178f55066ac78(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:48,383 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:48,383 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=13, startTime=1732368107543; duration=0sec 2024-11-23T13:21:48,383 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:48,384 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:48,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368168430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368168434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368168434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,527 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cdbc57cf014143148faec6b063e4e00a 2024-11-23T13:21:48,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/5eb13291a01743cd85051ef57616ee75 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75 2024-11-23T13:21:48,535 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75, entries=150, sequenceid=335, filesize=30.5 K 2024-11-23T13:21:48,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/dae05d9ac7c84318ba301f72e756fc96 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96 2024-11-23T13:21:48,540 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96, entries=150, sequenceid=335, filesize=12.0 K 2024-11-23T13:21:48,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cdbc57cf014143148faec6b063e4e00a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a 2024-11-23T13:21:48,545 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a, entries=150, sequenceid=335, filesize=12.0 K 2024-11-23T13:21:48,545 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 329ab862a28643091a2def94193b04dc in 895ms, sequenceid=335, compaction requested=false 2024-11-23T13:21:48,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:48,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:48,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T13:21:48,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-23T13:21:48,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-23T13:21:48,548 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9660 sec 2024-11-23T13:21:48,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.9710 sec 2024-11-23T13:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-23T13:21:48,684 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-23T13:21:48,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-23T13:21:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T13:21:48,687 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:48,688 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:48,688 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T13:21:48,840 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-23T13:21:48,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:48,841 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:48,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:48,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231fce2353da0d4e97af34d132102d4142_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368107821/Put/seqid=0 2024-11-23T13:21:48,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742081_1257 (size=12454) 2024-11-23T13:21:48,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:48,862 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231fce2353da0d4e97af34d132102d4142_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231fce2353da0d4e97af34d132102d4142_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:48,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c51618dc31294209aaa277aac90beb8b, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:48,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c51618dc31294209aaa277aac90beb8b is 175, key is test_row_0/A:col10/1732368107821/Put/seqid=0 2024-11-23T13:21:48,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742082_1258 (size=31255) 2024-11-23T13:21:48,871 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=362, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c51618dc31294209aaa277aac90beb8b 2024-11-23T13:21:48,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/b24def592af344abb9684778f696f9da is 50, key is test_row_0/B:col10/1732368107821/Put/seqid=0 2024-11-23T13:21:48,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742083_1259 (size=12301) 2024-11-23T13:21:48,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:48,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:48,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368168945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368168946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:48,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368168947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:48,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T13:21:49,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368169047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368169049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368169049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368169250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368169252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368169253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,288 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/b24def592af344abb9684778f696f9da 2024-11-23T13:21:49,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T13:21:49,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b901411096a447ac9a6d39c48a80210d is 50, key is test_row_0/C:col10/1732368107821/Put/seqid=0 2024-11-23T13:21:49,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742084_1260 (size=12301) 2024-11-23T13:21:49,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368169552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368169555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:49,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368169556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,702 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b901411096a447ac9a6d39c48a80210d 2024-11-23T13:21:49,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c51618dc31294209aaa277aac90beb8b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b 2024-11-23T13:21:49,728 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b, entries=150, sequenceid=362, filesize=30.5 K 2024-11-23T13:21:49,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/b24def592af344abb9684778f696f9da as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da 2024-11-23T13:21:49,734 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da, entries=150, sequenceid=362, filesize=12.0 K 2024-11-23T13:21:49,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/b901411096a447ac9a6d39c48a80210d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d 2024-11-23T13:21:49,738 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d, entries=150, sequenceid=362, filesize=12.0 K 2024-11-23T13:21:49,739 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 329ab862a28643091a2def94193b04dc in 899ms, sequenceid=362, compaction requested=true 2024-11-23T13:21:49,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:49,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:49,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-23T13:21:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-23T13:21:49,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-23T13:21:49,742 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0530 sec 2024-11-23T13:21:49,744 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.0580 sec 2024-11-23T13:21:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-23T13:21:49,790 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-23T13:21:49,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-23T13:21:49,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:49,793 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:49,794 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:49,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:49,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:49,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-23T13:21:49,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:49,947 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:49,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:49,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c0116b09cb744d6a51075c847b1eef8_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368108944/Put/seqid=0 2024-11-23T13:21:49,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742085_1261 (size=12454) 2024-11-23T13:21:50,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:50,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:50,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368170085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368170086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368170086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:50,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368170187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368170190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368170190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:50,364 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239c0116b09cb744d6a51075c847b1eef8_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c0116b09cb744d6a51075c847b1eef8_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:50,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/6d337b55ba7f461a8673efe6699c6597, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:50,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/6d337b55ba7f461a8673efe6699c6597 is 175, key is test_row_0/A:col10/1732368108944/Put/seqid=0 2024-11-23T13:21:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742086_1262 (size=31255) 2024-11-23T13:21:50,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368170392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368170392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368170392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:50,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368170694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368170694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368170695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:50,770 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/6d337b55ba7f461a8673efe6699c6597 2024-11-23T13:21:50,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f954af48f69447eb981da5bf3483187a is 50, key is test_row_0/B:col10/1732368108944/Put/seqid=0 2024-11-23T13:21:50,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742087_1263 (size=12301) 2024-11-23T13:21:50,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:51,184 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f954af48f69447eb981da5bf3483187a 2024-11-23T13:21:51,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cec8ddfa8de548b0a966b3c5d4cde96e is 50, key is test_row_0/C:col10/1732368108944/Put/seqid=0 2024-11-23T13:21:51,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742088_1264 (size=12301) 2024-11-23T13:21:51,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368171198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:51,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:51,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368171199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:51,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:51,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368171199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:51,596 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cec8ddfa8de548b0a966b3c5d4cde96e 2024-11-23T13:21:51,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/6d337b55ba7f461a8673efe6699c6597 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597 2024-11-23T13:21:51,605 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597, entries=150, sequenceid=375, filesize=30.5 K 2024-11-23T13:21:51,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/f954af48f69447eb981da5bf3483187a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a 2024-11-23T13:21:51,610 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a, entries=150, sequenceid=375, filesize=12.0 K 2024-11-23T13:21:51,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/cec8ddfa8de548b0a966b3c5d4cde96e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e 2024-11-23T13:21:51,614 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e, entries=150, sequenceid=375, filesize=12.0 K 2024-11-23T13:21:51,615 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 329ab862a28643091a2def94193b04dc in 1668ms, sequenceid=375, compaction requested=true 2024-11-23T13:21:51,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:51,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:51,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-23T13:21:51,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-23T13:21:51,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-23T13:21:51,617 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8220 sec 2024-11-23T13:21:51,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.8260 sec 2024-11-23T13:21:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-23T13:21:51,897 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-23T13:21:51,898 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-23T13:21:51,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:51,900 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:51,901 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:51,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:52,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:52,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,053 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-23T13:21:52,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:52,053 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:21:52,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:52,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:52,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:52,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:52,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:52,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:52,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237da32d3c629c4ddb9df1a12b8f4296e1_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368110081/Put/seqid=0 2024-11-23T13:21:52,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742089_1265 (size=12454) 2024-11-23T13:21:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:52,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. as already flushing 2024-11-23T13:21:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368172214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368172215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368172216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368172319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368172319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368172319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:52,470 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237da32d3c629c4ddb9df1a12b8f4296e1_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237da32d3c629c4ddb9df1a12b8f4296e1_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:52,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c4b3f6d6837b45639b1f7a44e9867cfc, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:52,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c4b3f6d6837b45639b1f7a44e9867cfc is 175, key is test_row_0/A:col10/1732368110081/Put/seqid=0 2024-11-23T13:21:52,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742090_1266 (size=31255) 2024-11-23T13:21:52,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:52,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368172520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368172521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368172522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37538 deadline: 1732368172824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37520 deadline: 1732368172825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37548 deadline: 1732368172826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:52,880 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=399, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c4b3f6d6837b45639b1f7a44e9867cfc 2024-11-23T13:21:52,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0412727594a04d7fa93267c89970e120 is 50, key is test_row_0/B:col10/1732368110081/Put/seqid=0 2024-11-23T13:21:52,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742091_1267 (size=12301) 2024-11-23T13:21:52,895 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0412727594a04d7fa93267c89970e120 2024-11-23T13:21:52,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/024d40b0a7f74a948b9795a3b29e3f3f is 50, key is test_row_0/C:col10/1732368110081/Put/seqid=0 2024-11-23T13:21:52,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742092_1268 (size=12301) 2024-11-23T13:21:53,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:53,139 DEBUG [Thread-780 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:51875 2024-11-23T13:21:53,139 DEBUG [Thread-780 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,139 DEBUG [Thread-778 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:51875 2024-11-23T13:21:53,139 DEBUG [Thread-778 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,142 DEBUG [Thread-784 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:51875 2024-11-23T13:21:53,142 DEBUG [Thread-784 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,146 DEBUG [Thread-782 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:51875 2024-11-23T13:21:53,146 DEBUG [Thread-782 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,310 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/024d40b0a7f74a948b9795a3b29e3f3f 2024-11-23T13:21:53,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/c4b3f6d6837b45639b1f7a44e9867cfc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc 2024-11-23T13:21:53,318 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc, entries=150, sequenceid=399, filesize=30.5 K 2024-11-23T13:21:53,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/0412727594a04d7fa93267c89970e120 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120 2024-11-23T13:21:53,322 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120, entries=150, sequenceid=399, filesize=12.0 K 2024-11-23T13:21:53,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/024d40b0a7f74a948b9795a3b29e3f3f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f 2024-11-23T13:21:53,325 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f, entries=150, sequenceid=399, filesize=12.0 K 2024-11-23T13:21:53,326 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 329ab862a28643091a2def94193b04dc in 1273ms, sequenceid=399, compaction requested=true 2024-11-23T13:21:53,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:53,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:53,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-23T13:21:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-23T13:21:53,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-23T13:21:53,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4260 sec 2024-11-23T13:21:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:53,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:21:53,329 DEBUG [Thread-769 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:51875 2024-11-23T13:21:53,329 DEBUG [Thread-769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:53,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:53,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.4310 sec 2024-11-23T13:21:53,330 DEBUG [Thread-773 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:51875 2024-11-23T13:21:53,330 DEBUG [Thread-773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,331 DEBUG [Thread-771 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:51875 2024-11-23T13:21:53,331 DEBUG [Thread-771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:53,335 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c3094c7de3b5450d9c4a1d606dd7ff32_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:53,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742093_1269 (size=12454) 2024-11-23T13:21:53,739 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:53,743 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c3094c7de3b5450d9c4a1d606dd7ff32_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c3094c7de3b5450d9c4a1d606dd7ff32_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:53,743 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/7acd715bee604ca18b77f834530173f7, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:53,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/7acd715bee604ca18b77f834530173f7 is 175, key is test_row_0/A:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:53,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742094_1270 (size=31255) 2024-11-23T13:21:54,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-23T13:21:54,004 INFO [Thread-777 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-23T13:21:54,148 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=412, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/7acd715bee604ca18b77f834530173f7 2024-11-23T13:21:54,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/d70893d9793b4ce089c8c858760834eb is 50, key is test_row_0/B:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742095_1271 (size=12301) 2024-11-23T13:21:54,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/d70893d9793b4ce089c8c858760834eb 2024-11-23T13:21:54,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/a0299015911b493eb604a067e8d925db is 50, key is test_row_0/C:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:54,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742096_1272 (size=12301) 2024-11-23T13:21:54,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/a0299015911b493eb604a067e8d925db 2024-11-23T13:21:54,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/7acd715bee604ca18b77f834530173f7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7 2024-11-23T13:21:54,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7, entries=150, sequenceid=412, filesize=30.5 K 2024-11-23T13:21:54,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/d70893d9793b4ce089c8c858760834eb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb 2024-11-23T13:21:54,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb, entries=150, sequenceid=412, filesize=12.0 K 2024-11-23T13:21:54,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/a0299015911b493eb604a067e8d925db as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db 2024-11-23T13:21:54,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db, entries=150, sequenceid=412, filesize=12.0 K 2024-11-23T13:21:54,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=13.42 KB/13740 for 329ab862a28643091a2def94193b04dc in 1658ms, sequenceid=412, compaction requested=true 2024-11-23T13:21:54,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:54,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:21:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:21:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:54,988 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-23T13:21:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 329ab862a28643091a2def94193b04dc:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:21:54,988 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-23T13:21:54,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:54,989 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74556 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-23T13:21:54,989 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 188280 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-23T13:21:54,989 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/B is initiating minor compaction (all files) 2024-11-23T13:21:54,989 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/A is initiating minor compaction (all files) 2024-11-23T13:21:54,989 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/B in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:54,989 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/A in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:54,990 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/a32973da69944cf5b4e6217442b34b93, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=72.8 K 2024-11-23T13:21:54,990 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=183.9 K 2024-11-23T13:21:54,990 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:54,990 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7] 2024-11-23T13:21:54,990 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a32973da69944cf5b4e6217442b34b93, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:54,990 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e465148875794b66824178f55066ac78, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:54,990 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting dae05d9ac7c84318ba301f72e756fc96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732368106678 2024-11-23T13:21:54,990 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5eb13291a01743cd85051ef57616ee75, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732368106678 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b24def592af344abb9684778f696f9da, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732368107821 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f954af48f69447eb981da5bf3483187a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732368108939 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c51618dc31294209aaa277aac90beb8b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732368107821 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0412727594a04d7fa93267c89970e120, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732368110081 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d337b55ba7f461a8673efe6699c6597, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732368108939 2024-11-23T13:21:54,991 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting d70893d9793b4ce089c8c858760834eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732368112210 2024-11-23T13:21:54,992 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4b3f6d6837b45639b1f7a44e9867cfc, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732368110081 2024-11-23T13:21:54,992 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7acd715bee604ca18b77f834530173f7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732368112210 2024-11-23T13:21:55,004 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#B#compaction#227 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:55,004 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/89911876c31043c69ec5d700aa74b4b2 is 50, key is test_row_0/B:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:55,012 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:55,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742097_1273 (size=13255) 2024-11-23T13:21:55,016 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411235b6b6c4d7ae94e30bc47e54d254715af_329ab862a28643091a2def94193b04dc store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:55,021 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/89911876c31043c69ec5d700aa74b4b2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/89911876c31043c69ec5d700aa74b4b2 2024-11-23T13:21:55,027 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 329ab862a28643091a2def94193b04dc/B of 329ab862a28643091a2def94193b04dc into 89911876c31043c69ec5d700aa74b4b2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:55,027 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:55,027 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/B, priority=10, startTime=1732368114988; duration=0sec 2024-11-23T13:21:55,027 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:21:55,028 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:B 2024-11-23T13:21:55,028 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-23T13:21:55,030 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 74522 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-23T13:21:55,030 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 329ab862a28643091a2def94193b04dc/C is initiating minor compaction (all files) 2024-11-23T13:21:55,030 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 329ab862a28643091a2def94193b04dc/C in TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:55,030 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/c3cf2cf96e464fc5beac5e85c6a1739a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp, totalSize=72.8 K 2024-11-23T13:21:55,030 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c3cf2cf96e464fc5beac5e85c6a1739a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368106362 2024-11-23T13:21:55,031 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cdbc57cf014143148faec6b063e4e00a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1732368106678 2024-11-23T13:21:55,031 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b901411096a447ac9a6d39c48a80210d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1732368107821 2024-11-23T13:21:55,031 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cec8ddfa8de548b0a966b3c5d4cde96e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=375, earliestPutTs=1732368108939 2024-11-23T13:21:55,032 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 024d40b0a7f74a948b9795a3b29e3f3f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732368110081 2024-11-23T13:21:55,032 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a0299015911b493eb604a067e8d925db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1732368112210 2024-11-23T13:21:55,055 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#C#compaction#229 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:55,055 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/2e642ae1a7b049298ce8aeb8ddf4d476 is 50, key is test_row_0/C:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:55,057 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411235b6b6c4d7ae94e30bc47e54d254715af_329ab862a28643091a2def94193b04dc, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:55,058 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235b6b6c4d7ae94e30bc47e54d254715af_329ab862a28643091a2def94193b04dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:55,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742098_1274 (size=13221) 2024-11-23T13:21:55,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742099_1275 (size=4469) 2024-11-23T13:21:55,065 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 329ab862a28643091a2def94193b04dc#A#compaction#228 average throughput is 0.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:21:55,066 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/81518a0a3bed4aa491bc12f2efe47581 is 175, key is test_row_0/A:col10/1732368112210/Put/seqid=0 2024-11-23T13:21:55,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742100_1276 (size=32209) 2024-11-23T13:21:55,076 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/81518a0a3bed4aa491bc12f2efe47581 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/81518a0a3bed4aa491bc12f2efe47581 2024-11-23T13:21:55,081 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 329ab862a28643091a2def94193b04dc/A of 329ab862a28643091a2def94193b04dc into 81518a0a3bed4aa491bc12f2efe47581(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:55,082 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:55,082 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/A, priority=10, startTime=1732368114987; duration=0sec 2024-11-23T13:21:55,082 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:55,082 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:A 2024-11-23T13:21:55,423 DEBUG [Thread-767 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:51875 2024-11-23T13:21:55,423 DEBUG [Thread-767 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:55,464 DEBUG [Thread-775 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:51875 2024-11-23T13:21:55,464 DEBUG [Thread-775 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:55,465 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/2e642ae1a7b049298ce8aeb8ddf4d476 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/2e642ae1a7b049298ce8aeb8ddf4d476 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 29 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7202 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6983 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3043 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9129 rows 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2992 2024-11-23T13:21:55,465 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8976 rows 2024-11-23T13:21:55,465 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:21:55,465 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a9b9802 to 127.0.0.1:51875 2024-11-23T13:21:55,465 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:21:55,470 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:21:55,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:21:55,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:55,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:55,475 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 329ab862a28643091a2def94193b04dc/C of 329ab862a28643091a2def94193b04dc into 2e642ae1a7b049298ce8aeb8ddf4d476(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:21:55,475 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:55,475 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc., storeName=329ab862a28643091a2def94193b04dc/C, priority=10, startTime=1732368114988; duration=0sec 2024-11-23T13:21:55,475 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368115475"}]},"ts":"1732368115475"} 2024-11-23T13:21:55,475 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:21:55,475 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 329ab862a28643091a2def94193b04dc:C 2024-11-23T13:21:55,476 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:21:55,479 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:21:55,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:21:55,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, UNASSIGN}] 2024-11-23T13:21:55,481 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, UNASSIGN 2024-11-23T13:21:55,481 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:55,482 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:21:55,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:55,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:55,633 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:55,633 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:55,633 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 329ab862a28643091a2def94193b04dc, disabling compactions & flushes 2024-11-23T13:21:55,634 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. after waiting 0 ms 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:55,634 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing 329ab862a28643091a2def94193b04dc 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=A 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=B 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 329ab862a28643091a2def94193b04dc, store=C 2024-11-23T13:21:55,634 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:55,640 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236ec4ef08b60043c3976030e2619dfdd6_329ab862a28643091a2def94193b04dc is 50, key is test_row_0/A:col10/1732368115463/Put/seqid=0 2024-11-23T13:21:55,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742101_1277 (size=12454) 2024-11-23T13:21:55,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:56,045 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:56,049 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236ec4ef08b60043c3976030e2619dfdd6_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ec4ef08b60043c3976030e2619dfdd6_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:56,050 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/74ce349ac32b46b885bd0fd1494f6cdb, store: [table=TestAcidGuarantees family=A region=329ab862a28643091a2def94193b04dc] 2024-11-23T13:21:56,050 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/74ce349ac32b46b885bd0fd1494f6cdb is 175, key is test_row_0/A:col10/1732368115463/Put/seqid=0 2024-11-23T13:21:56,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742102_1278 (size=31255) 2024-11-23T13:21:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:56,455 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/74ce349ac32b46b885bd0fd1494f6cdb 2024-11-23T13:21:56,462 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/99e7c0c0fda1490d83efbb70e8bc8aff is 50, key is test_row_0/B:col10/1732368115463/Put/seqid=0 2024-11-23T13:21:56,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742103_1279 (size=12301) 2024-11-23T13:21:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:56,866 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/99e7c0c0fda1490d83efbb70e8bc8aff 2024-11-23T13:21:56,872 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8376333834af4c379245d78a8d952018 is 50, key is test_row_0/C:col10/1732368115463/Put/seqid=0 2024-11-23T13:21:56,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742104_1280 (size=12301) 2024-11-23T13:21:57,276 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8376333834af4c379245d78a8d952018 2024-11-23T13:21:57,281 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/A/74ce349ac32b46b885bd0fd1494f6cdb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/74ce349ac32b46b885bd0fd1494f6cdb 2024-11-23T13:21:57,284 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/74ce349ac32b46b885bd0fd1494f6cdb, entries=150, sequenceid=422, filesize=30.5 K 2024-11-23T13:21:57,285 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/B/99e7c0c0fda1490d83efbb70e8bc8aff as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/99e7c0c0fda1490d83efbb70e8bc8aff 2024-11-23T13:21:57,288 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/99e7c0c0fda1490d83efbb70e8bc8aff, entries=150, sequenceid=422, filesize=12.0 K 2024-11-23T13:21:57,289 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/.tmp/C/8376333834af4c379245d78a8d952018 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8376333834af4c379245d78a8d952018 2024-11-23T13:21:57,292 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8376333834af4c379245d78a8d952018, entries=150, sequenceid=422, filesize=12.0 K 2024-11-23T13:21:57,293 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 329ab862a28643091a2def94193b04dc in 1659ms, sequenceid=422, compaction requested=false 2024-11-23T13:21:57,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7] to archive 2024-11-23T13:21:57,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:57,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/9c11251b964941bbb4b14ec8c6a16332 2024-11-23T13:21:57,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/f1bc792fd82a45caa3276205ef6ea27a 2024-11-23T13:21:57,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/8903b8263a934fba936b19de9c4d569e 2024-11-23T13:21:57,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/2841fd4a0924464488a024de3be045c5 2024-11-23T13:21:57,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/48e8745c33ab4a0e81e0c3f9bdc7b386 2024-11-23T13:21:57,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/38db820cfb8347ef8c2e7a43a5ba2cbc 2024-11-23T13:21:57,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a076e5e45972414b865ee51f27682fd4 2024-11-23T13:21:57,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e52944402fab4a49b330eb99d44d9266 2024-11-23T13:21:57,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/24a6d7758ea741628d2f627cc6961319 2024-11-23T13:21:57,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d03f117e44974c54b61fa24844227889 2024-11-23T13:21:57,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/1fed186fc19d4be19fbd9f87ecfddfbd 2024-11-23T13:21:57,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd0958482dbd47d4b68a2c9fac6ff415 2024-11-23T13:21:57,307 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/cd7605ca831247c39fcdcc14e3bee9d3 2024-11-23T13:21:57,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba77b3abad3f44428c4eac5427ff0af4 2024-11-23T13:21:57,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d22396ec3b2045b78d635d6066d0acd7 2024-11-23T13:21:57,309 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/ba3c825e25204d9ca62952e4ee3ac299 2024-11-23T13:21:57,310 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/dfaa0c177787464f9ebffa861cba3a1c 2024-11-23T13:21:57,311 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/d9195b128b0b4e2d8ab6f3efd80a649d 2024-11-23T13:21:57,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/10c84d14f46141b4b4cb2918362ccdba 2024-11-23T13:21:57,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/62ab31ac3f654d2cafb2858b81117f93 2024-11-23T13:21:57,314 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/a227bb05cc514f31bb501ab58af951f2 2024-11-23T13:21:57,315 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/fa5df624cb3d43b9b541a977c8ca1f27 2024-11-23T13:21:57,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/e465148875794b66824178f55066ac78 2024-11-23T13:21:57,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/5eb13291a01743cd85051ef57616ee75 2024-11-23T13:21:57,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c51618dc31294209aaa277aac90beb8b 2024-11-23T13:21:57,318 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/6d337b55ba7f461a8673efe6699c6597 2024-11-23T13:21:57,319 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/c4b3f6d6837b45639b1f7a44e9867cfc 2024-11-23T13:21:57,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/7acd715bee604ca18b77f834530173f7 2024-11-23T13:21:57,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8a8a6e14036e4d3e823a0be098350be6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/56cfddb2761047a9bda14062d78ebd31, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/18f482ecdc934ef68829eb705b5ce56d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/97282304dd2a41f081ec545c4ee66025, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/4d409054705e45af9bb31929ed38aa4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/c5eaa23698f64747aa891011be849c92, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/a32973da69944cf5b4e6217442b34b93, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb] to archive 2024-11-23T13:21:57,322 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:57,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/019accfee80d4b85b9eb4e17c6e2c8c9 2024-11-23T13:21:57,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/829b398771ac4abfb3e4d1c785aaac4f 2024-11-23T13:21:57,325 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8a8a6e14036e4d3e823a0be098350be6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8a8a6e14036e4d3e823a0be098350be6 2024-11-23T13:21:57,326 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/2ff7381550ce4f6d91be6e7fc194954c 2024-11-23T13:21:57,327 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/38136ff66cc8455784e3d63afc0f01a4 2024-11-23T13:21:57,328 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/56cfddb2761047a9bda14062d78ebd31 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/56cfddb2761047a9bda14062d78ebd31 2024-11-23T13:21:57,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f1986abb333b4bf5a2cadaa81ec56781 2024-11-23T13:21:57,330 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/95ebef089cc44a88b01a88d5bf79ee44 2024-11-23T13:21:57,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/18f482ecdc934ef68829eb705b5ce56d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/18f482ecdc934ef68829eb705b5ce56d 2024-11-23T13:21:57,332 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dfa527b572db4195b5ea59230673a7fc 2024-11-23T13:21:57,333 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc69ad943e9c405e93943aa7ebc47a40 2024-11-23T13:21:57,334 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/97282304dd2a41f081ec545c4ee66025 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/97282304dd2a41f081ec545c4ee66025 2024-11-23T13:21:57,335 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/325ec228506245cb9044ebec47fb0f21 2024-11-23T13:21:57,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/8da1a2c2c12545abaa1d4de3609bda3e 2024-11-23T13:21:57,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/4d409054705e45af9bb31929ed38aa4c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/4d409054705e45af9bb31929ed38aa4c 2024-11-23T13:21:57,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0b68aa8bdad2418183ace2c31715f6da 2024-11-23T13:21:57,339 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/9f53c6fb1fb9483296879db636920535 2024-11-23T13:21:57,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/bb6eb8aa8fa84a13817fbf20bb1a50eb 2024-11-23T13:21:57,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/c5eaa23698f64747aa891011be849c92 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/c5eaa23698f64747aa891011be849c92 2024-11-23T13:21:57,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/51d598bac790487dacb9a028007ff443 2024-11-23T13:21:57,344 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dcc91cb677a540d187e89a0aae91eeff 2024-11-23T13:21:57,345 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/a32973da69944cf5b4e6217442b34b93 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/a32973da69944cf5b4e6217442b34b93 2024-11-23T13:21:57,346 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dc6c3c84db814b8e9bdfae19c3ef8755 2024-11-23T13:21:57,347 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/dae05d9ac7c84318ba301f72e756fc96 2024-11-23T13:21:57,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/b24def592af344abb9684778f696f9da 2024-11-23T13:21:57,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/f954af48f69447eb981da5bf3483187a 2024-11-23T13:21:57,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/0412727594a04d7fa93267c89970e120 2024-11-23T13:21:57,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/d70893d9793b4ce089c8c858760834eb 2024-11-23T13:21:57,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/849fd21c65aa4578a9d2544c6f9effb5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/ca91ec899d96490e9042e01c6da51815, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8586e68680504de9bc272ea9bdf81d88, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/c3cf2cf96e464fc5beac5e85c6a1739a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db] to archive 2024-11-23T13:21:57,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:21:57,355 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/d0d3e71fd37a4feea530420e3490185f 2024-11-23T13:21:57,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/dc92b4f700d3418aaccd5117fc865d6e 2024-11-23T13:21:57,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8fcb50e3fe4e48d2bf3c1c363bb8fa32 2024-11-23T13:21:57,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/4d9790bf543a4edf858c16f4da7246da 2024-11-23T13:21:57,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/072aae490f6f4142a8a08943fcfe7843 2024-11-23T13:21:57,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b514bb39a6d84642babf5cde55c8b3f7 2024-11-23T13:21:57,361 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/676ec2d1d58f4ecc8bfc4a45a2e29f5f 2024-11-23T13:21:57,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/7bec4db5054747dea0b47a1ee850b442 2024-11-23T13:21:57,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cf7257838e7c47c898ee7e7944ba9479 2024-11-23T13:21:57,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/849fd21c65aa4578a9d2544c6f9effb5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/849fd21c65aa4578a9d2544c6f9effb5 2024-11-23T13:21:57,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fdb5deea460748c59cec6e36916e3539 2024-11-23T13:21:57,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/77c5427e1c7c480bb53b849a014be1b2 2024-11-23T13:21:57,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/531227f6aada45938c99895ca0f12726 2024-11-23T13:21:57,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/ca91ec899d96490e9042e01c6da51815 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/ca91ec899d96490e9042e01c6da51815 2024-11-23T13:21:57,368 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fca8e3be2f3f490b8f29e8f63d5d1256 2024-11-23T13:21:57,369 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/e2b3de0a38794a94b6d618d5c5f30234 2024-11-23T13:21:57,370 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/17ae445ffaee46b8bab78ed468539855 2024-11-23T13:21:57,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8586e68680504de9bc272ea9bdf81d88 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8586e68680504de9bc272ea9bdf81d88 2024-11-23T13:21:57,372 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/fbdc0953f7854ceda2a489de0e8143f9 2024-11-23T13:21:57,373 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8687fbf5765f4eed96ac74dc7f00a8a8 2024-11-23T13:21:57,374 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/c3cf2cf96e464fc5beac5e85c6a1739a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/c3cf2cf96e464fc5beac5e85c6a1739a 2024-11-23T13:21:57,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/94e4fce5409b4105a33b7d0b74ac2846 2024-11-23T13:21:57,376 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cdbc57cf014143148faec6b063e4e00a 2024-11-23T13:21:57,377 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/b901411096a447ac9a6d39c48a80210d 2024-11-23T13:21:57,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/cec8ddfa8de548b0a966b3c5d4cde96e 2024-11-23T13:21:57,378 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/024d40b0a7f74a948b9795a3b29e3f3f 2024-11-23T13:21:57,379 DEBUG [StoreCloser-TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/a0299015911b493eb604a067e8d925db 2024-11-23T13:21:57,384 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits/425.seqid, newMaxSeqId=425, maxSeqId=4 2024-11-23T13:21:57,385 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc. 2024-11-23T13:21:57,385 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 329ab862a28643091a2def94193b04dc: 2024-11-23T13:21:57,386 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,387 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=329ab862a28643091a2def94193b04dc, regionState=CLOSED 2024-11-23T13:21:57,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-23T13:21:57,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 329ab862a28643091a2def94193b04dc, server=ba2e440802a7,33173,1732368061317 in 1.9050 sec 2024-11-23T13:21:57,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-23T13:21:57,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=329ab862a28643091a2def94193b04dc, UNASSIGN in 1.9090 sec 2024-11-23T13:21:57,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-23T13:21:57,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9110 sec 2024-11-23T13:21:57,392 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368117392"}]},"ts":"1732368117392"} 2024-11-23T13:21:57,393 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:21:57,396 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:21:57,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9250 sec 2024-11-23T13:21:57,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-23T13:21:57,578 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-23T13:21:57,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:21:57,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,580 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-23T13:21:57,581 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,583 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,585 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits] 2024-11-23T13:21:57,588 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/74ce349ac32b46b885bd0fd1494f6cdb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/74ce349ac32b46b885bd0fd1494f6cdb 2024-11-23T13:21:57,589 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/81518a0a3bed4aa491bc12f2efe47581 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/A/81518a0a3bed4aa491bc12f2efe47581 2024-11-23T13:21:57,591 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/89911876c31043c69ec5d700aa74b4b2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/89911876c31043c69ec5d700aa74b4b2 2024-11-23T13:21:57,592 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/99e7c0c0fda1490d83efbb70e8bc8aff to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/B/99e7c0c0fda1490d83efbb70e8bc8aff 2024-11-23T13:21:57,594 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/2e642ae1a7b049298ce8aeb8ddf4d476 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/2e642ae1a7b049298ce8aeb8ddf4d476 2024-11-23T13:21:57,595 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8376333834af4c379245d78a8d952018 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/C/8376333834af4c379245d78a8d952018 2024-11-23T13:21:57,598 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits/425.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc/recovered.edits/425.seqid 2024-11-23T13:21:57,599 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,599 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:21:57,599 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:21:57,600 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T13:21:57,604 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123097321f2acee4b0cac1c74df45847f4b_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123097321f2acee4b0cac1c74df45847f4b_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,605 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123147440e7b3f64301a160043e53cad339_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123147440e7b3f64301a160043e53cad339_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,606 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231d18593a8b2b4a3a8d63ccce328b07bb_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231d18593a8b2b4a3a8d63ccce328b07bb_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,607 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231fce2353da0d4e97af34d132102d4142_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411231fce2353da0d4e97af34d132102d4142_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,608 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123214865e159db4a5aa078ae6453e6a607_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123214865e159db4a5aa078ae6453e6a607_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,609 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232d8477f4fbd64f0ea7fe3a7a9d9007d3_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411232d8477f4fbd64f0ea7fe3a7a9d9007d3_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,610 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123310aa13aad46409a82876a9a9694a537_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123310aa13aad46409a82876a9a9694a537_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,611 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112335c7114a17fb457bb491cf8e8864de05_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112335c7114a17fb457bb491cf8e8864de05_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,612 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233c0188774fd8468198ac6c4c162173d2_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233c0188774fd8468198ac6c4c162173d2_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,613 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112341680f5446ca4e158afb7cafe7482aa9_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112341680f5446ca4e158afb7cafe7482aa9_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,614 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112363287c6123d34edbb43a6c5820c609c4_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112363287c6123d34edbb43a6c5820c609c4_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,615 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ec4ef08b60043c3976030e2619dfdd6_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236ec4ef08b60043c3976030e2619dfdd6_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,617 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237da32d3c629c4ddb9df1a12b8f4296e1_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237da32d3c629c4ddb9df1a12b8f4296e1_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,618 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112384e9fab2aee24319a6a5ede647d77aa8_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112384e9fab2aee24319a6a5ede647d77aa8_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,619 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112391b5a2527ce94054915ca2db64c5a526_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112391b5a2527ce94054915ca2db64c5a526_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,620 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112398f24e24f93f4377b60db95a78fe9d93_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112398f24e24f93f4377b60db95a78fe9d93_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,622 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c0116b09cb744d6a51075c847b1eef8_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239c0116b09cb744d6a51075c847b1eef8_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,623 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b13fbb45273f4ab39585be886360f18c_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123b13fbb45273f4ab39585be886360f18c_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,624 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba937118838c4d938243ae727d712b3e_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ba937118838c4d938243ae727d712b3e_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,625 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c0dddec17a644617a931084e9c56534f_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c0dddec17a644617a931084e9c56534f_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,627 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c3094c7de3b5450d9c4a1d606dd7ff32_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c3094c7de3b5450d9c4a1d606dd7ff32_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,628 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f682c971c47b4f29844c1d0d72fe7703_329ab862a28643091a2def94193b04dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f682c971c47b4f29844c1d0d72fe7703_329ab862a28643091a2def94193b04dc 2024-11-23T13:21:57,629 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:21:57,631 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,634 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:21:57,636 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:21:57,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,637 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:21:57,637 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368117637"}]},"ts":"9223372036854775807"} 2024-11-23T13:21:57,639 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:21:57,639 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 329ab862a28643091a2def94193b04dc, NAME => 'TestAcidGuarantees,,1732368090011.329ab862a28643091a2def94193b04dc.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:21:57,639 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:21:57,639 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368117639"}]},"ts":"9223372036854775807"} 2024-11-23T13:21:57,640 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:21:57,643 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 64 msec 2024-11-23T13:21:57,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-23T13:21:57,681 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-23T13:21:57,691 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=247 (was 240) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2126048790_22 at /127.0.0.1:38100 [Waiting for operation #148] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-953829615_22 at /127.0.0.1:38076 [Waiting for operation #214] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-953829615_22 at /127.0.0.1:58224 [Waiting for operation #588] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2126048790_22 at /127.0.0.1:40360 [Waiting for operation #602] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x1ab0a33d-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 461) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=351 (was 378), ProcessCount=11 (was 11), AvailableMemoryMB=3751 (was 3839) 2024-11-23T13:21:57,700 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=247, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=351, ProcessCount=11, AvailableMemoryMB=3750 2024-11-23T13:21:57,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:21:57,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:21:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:21:57,703 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:21:57,703 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:57,703 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:21:57,703 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-23T13:21:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:57,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742105_1281 (size=960) 2024-11-23T13:21:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:58,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:58,110 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:21:58,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742106_1282 (size=53) 2024-11-23T13:21:58,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:58,516 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:58,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing be94e75499ad728566c6b2d31ebaacd4, disabling compactions & flushes 2024-11-23T13:21:58,517 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. after waiting 0 ms 2024-11-23T13:21:58,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,517 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,517 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:21:58,518 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:21:58,518 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368118518"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368118518"}]},"ts":"1732368118518"} 2024-11-23T13:21:58,519 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:21:58,520 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:21:58,520 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368118520"}]},"ts":"1732368118520"} 2024-11-23T13:21:58,520 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:21:58,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, ASSIGN}] 2024-11-23T13:21:58,526 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, ASSIGN 2024-11-23T13:21:58,526 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:21:58,677 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=be94e75499ad728566c6b2d31ebaacd4, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:58,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:21:58,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:58,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:21:58,832 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,833 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:21:58,833 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,833 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:21:58,833 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,833 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,834 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,835 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:58,836 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be94e75499ad728566c6b2d31ebaacd4 columnFamilyName A 2024-11-23T13:21:58,836 DEBUG [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:58,836 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(327): Store=be94e75499ad728566c6b2d31ebaacd4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:58,836 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,837 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:58,837 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be94e75499ad728566c6b2d31ebaacd4 columnFamilyName B 2024-11-23T13:21:58,837 DEBUG [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:58,838 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(327): Store=be94e75499ad728566c6b2d31ebaacd4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:58,838 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,839 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:21:58,839 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be94e75499ad728566c6b2d31ebaacd4 columnFamilyName C 2024-11-23T13:21:58,839 DEBUG [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:21:58,839 INFO [StoreOpener-be94e75499ad728566c6b2d31ebaacd4-1 {}] regionserver.HStore(327): Store=be94e75499ad728566c6b2d31ebaacd4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:21:58,839 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,840 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,840 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,841 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:21:58,842 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:58,844 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:21:58,844 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened be94e75499ad728566c6b2d31ebaacd4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59080576, jitterRate=-0.11963081359863281}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:21:58,845 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:21:58,845 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., pid=68, masterSystemTime=1732368118829 2024-11-23T13:21:58,847 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,847 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:21:58,847 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=be94e75499ad728566c6b2d31ebaacd4, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:58,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-23T13:21:58,849 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 in 170 msec 2024-11-23T13:21:58,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-23T13:21:58,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, ASSIGN in 324 msec 2024-11-23T13:21:58,851 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:21:58,851 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368118851"}]},"ts":"1732368118851"} 2024-11-23T13:21:58,852 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:21:58,854 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:21:58,855 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1530 sec 2024-11-23T13:21:59,719 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:21:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-23T13:21:59,808 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-23T13:21:59,809 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-11-23T13:21:59,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,815 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,816 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,817 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:21:59,818 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:21:59,820 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-11-23T13:21:59,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,823 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-11-23T13:21:59,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,827 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-11-23T13:21:59,830 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,831 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-11-23T13:21:59,834 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,835 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-11-23T13:21:59,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,838 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-11-23T13:21:59,841 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,842 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-11-23T13:21:59,845 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,846 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-11-23T13:21:59,849 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,849 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-11-23T13:21:59,852 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,853 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-11-23T13:21:59,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:21:59,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:21:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-23T13:21:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:21:59,863 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:21:59,864 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:21:59,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:21:59,864 DEBUG [hconnection-0x7ab4e70c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,865 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,868 DEBUG [hconnection-0x596e40a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,868 DEBUG [hconnection-0xb98948a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,869 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,869 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,884 DEBUG [hconnection-0x3e228f6d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,884 DEBUG [hconnection-0x7b7d4f2e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,885 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,886 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:21:59,890 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:21:59,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:21:59,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:59,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:21:59,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:59,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:21:59,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:21:59,892 DEBUG [hconnection-0x280c8c04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,892 DEBUG [hconnection-0x54a88b33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,893 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,894 DEBUG [hconnection-0x3a4b20eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,894 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37480, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,895 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37486, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368179908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:59,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368179909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:59,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368179909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:59,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368179910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:59,916 DEBUG [hconnection-0x4b90a49e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,918 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,918 DEBUG [hconnection-0x36f35b87-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:21:59,920 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:21:59,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:21:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368179921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:21:59,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b7ce3c9648e84167987d90f7777f2f76 is 50, key is test_row_0/A:col10/1732368119876/Put/seqid=0 2024-11-23T13:21:59,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742107_1283 (size=12001) 2024-11-23T13:21:59,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b7ce3c9648e84167987d90f7777f2f76 2024-11-23T13:21:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:21:59,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/7ec8fa4b11f64891996f9c429092f365 is 50, key is test_row_0/B:col10/1732368119876/Put/seqid=0 2024-11-23T13:22:00,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742108_1284 (size=12001) 2024-11-23T13:22:00,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368180012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368180012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368180012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,014 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368180013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,016 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368180023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:22:00,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368180215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368180215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368180215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368180216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368180225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,322 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,404 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/7ec8fa4b11f64891996f9c429092f365 2024-11-23T13:22:00,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a1ce11ce29bc4f8db1e3b964f6361e74 is 50, key is test_row_0/C:col10/1732368119876/Put/seqid=0 2024-11-23T13:22:00,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742109_1285 (size=12001) 2024-11-23T13:22:00,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:22:00,475 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368180518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368180518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368180519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368180519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368180528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,782 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:00,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:00,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a1ce11ce29bc4f8db1e3b964f6361e74 2024-11-23T13:22:00,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b7ce3c9648e84167987d90f7777f2f76 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76 2024-11-23T13:22:00,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:22:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/7ec8fa4b11f64891996f9c429092f365 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365 2024-11-23T13:22:00,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:22:00,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a1ce11ce29bc4f8db1e3b964f6361e74 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74 2024-11-23T13:22:00,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:22:00,853 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for be94e75499ad728566c6b2d31ebaacd4 in 963ms, sequenceid=14, compaction requested=false 2024-11-23T13:22:00,853 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-23T13:22:00,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:00,936 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:00,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-23T13:22:00,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:00,937 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:00,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:00,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a3a4262ad2664df986b21bc4b5a3379b is 50, key is test_row_0/A:col10/1732368119909/Put/seqid=0 2024-11-23T13:22:00,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742110_1286 (size=12001) 2024-11-23T13:22:00,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:22:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:01,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:01,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368181028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368181029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368181030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368181035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368181038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368181132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368181132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368181136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368181139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368181336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368181336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368181338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368181342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,349 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a3a4262ad2664df986b21bc4b5a3379b 2024-11-23T13:22:01,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3656aa6b14a1475793fc8e595b05b49b is 50, key is test_row_0/B:col10/1732368119909/Put/seqid=0 2024-11-23T13:22:01,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742111_1287 (size=12001) 2024-11-23T13:22:01,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368181639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368181640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368181640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:01,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368181644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:01,766 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3656aa6b14a1475793fc8e595b05b49b 2024-11-23T13:22:01,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/1f30b48f63094b6da57f39306affb2cb is 50, key is test_row_0/C:col10/1732368119909/Put/seqid=0 2024-11-23T13:22:01,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742112_1288 (size=12001) 2024-11-23T13:22:01,792 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/1f30b48f63094b6da57f39306affb2cb 2024-11-23T13:22:01,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a3a4262ad2664df986b21bc4b5a3379b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b 2024-11-23T13:22:01,802 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:22:01,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3656aa6b14a1475793fc8e595b05b49b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b 2024-11-23T13:22:01,808 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:22:01,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/1f30b48f63094b6da57f39306affb2cb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb 2024-11-23T13:22:01,814 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:22:01,814 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for be94e75499ad728566c6b2d31ebaacd4 in 877ms, sequenceid=37, compaction requested=false 2024-11-23T13:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:01,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T13:22:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-23T13:22:01,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-23T13:22:01,817 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9520 sec 2024-11-23T13:22:01,819 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.9560 sec 2024-11-23T13:22:01,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-23T13:22:01,968 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-23T13:22:01,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-23T13:22:01,971 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:01,971 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:01,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:01,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:02,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:02,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:02,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b76edfe017a543e18c0963adc92b4063 is 50, key is test_row_0/A:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742113_1289 (size=9657) 2024-11-23T13:22:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:02,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368182085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368182142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368182144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368182145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368182147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368182187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:02,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368182392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b76edfe017a543e18c0963adc92b4063 2024-11-23T13:22:02,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3f88f6e318534d229315caea664160a7 is 50, key is test_row_0/B:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:02,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742114_1290 (size=9657) 2024-11-23T13:22:02,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:02,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:02,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368182694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,880 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3f88f6e318534d229315caea664160a7 2024-11-23T13:22:02,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/554776b22ac349a8a2ae4152d43581bd is 50, key is test_row_0/C:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:02,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:02,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:02,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:02,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:02,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:02,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742115_1291 (size=9657) 2024-11-23T13:22:02,912 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:22:03,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:03,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:03,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:03,151 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368183149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368183152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368183152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368183153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:03,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:03,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:03,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368183201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/554776b22ac349a8a2ae4152d43581bd 2024-11-23T13:22:03,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/b76edfe017a543e18c0963adc92b4063 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063 2024-11-23T13:22:03,302 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063, entries=100, sequenceid=52, filesize=9.4 K 2024-11-23T13:22:03,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3f88f6e318534d229315caea664160a7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7 2024-11-23T13:22:03,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7, entries=100, sequenceid=52, filesize=9.4 K 2024-11-23T13:22:03,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/554776b22ac349a8a2ae4152d43581bd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd 2024-11-23T13:22:03,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd, entries=100, sequenceid=52, filesize=9.4 K 2024-11-23T13:22:03,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for be94e75499ad728566c6b2d31ebaacd4 in 1267ms, sequenceid=52, compaction requested=true 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:03,313 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:03,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:03,313 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:03,314 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:03,314 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:03,315 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:03,315 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:03,315 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,315 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,315 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=32.9 K 2024-11-23T13:22:03,315 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=32.9 K 2024-11-23T13:22:03,315 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ec8fa4b11f64891996f9c429092f365, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368119876 2024-11-23T13:22:03,315 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7ce3c9648e84167987d90f7777f2f76, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368119876 2024-11-23T13:22:03,316 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3a4262ad2664df986b21bc4b5a3379b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368119907 2024-11-23T13:22:03,316 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3656aa6b14a1475793fc8e595b05b49b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368119907 2024-11-23T13:22:03,316 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b76edfe017a543e18c0963adc92b4063, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368121029 2024-11-23T13:22:03,316 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f88f6e318534d229315caea664160a7, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368121029 2024-11-23T13:22:03,323 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:03,324 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/10dc590f52fa45a1943dfdf27c945412 is 50, key is test_row_0/B:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:03,325 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#242 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:03,326 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/0c6d26c9f54a4360908ec1a4634e32d8 is 50, key is test_row_0/A:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:03,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742116_1292 (size=12104) 2024-11-23T13:22:03,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:03,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,348 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:03,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:03,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742117_1293 (size=12104) 2024-11-23T13:22:03,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3218c5e9e44c4c57be11d78d641d4727 is 50, key is test_row_0/A:col10/1732368122070/Put/seqid=0 2024-11-23T13:22:03,357 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/0c6d26c9f54a4360908ec1a4634e32d8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/0c6d26c9f54a4360908ec1a4634e32d8 2024-11-23T13:22:03,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742118_1294 (size=12001) 2024-11-23T13:22:03,364 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into 0c6d26c9f54a4360908ec1a4634e32d8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:03,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:03,364 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368123313; duration=0sec 2024-11-23T13:22:03,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:03,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:03,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:03,365 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:03,365 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:03,365 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:03,365 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=32.9 K 2024-11-23T13:22:03,365 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3218c5e9e44c4c57be11d78d641d4727 2024-11-23T13:22:03,366 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1ce11ce29bc4f8db1e3b964f6361e74, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368119876 2024-11-23T13:22:03,366 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f30b48f63094b6da57f39306affb2cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368119907 2024-11-23T13:22:03,366 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 554776b22ac349a8a2ae4152d43581bd, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368121029 2024-11-23T13:22:03,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6d57b3e763b3462d849a590c25019692 is 50, key is test_row_0/B:col10/1732368122070/Put/seqid=0 2024-11-23T13:22:03,378 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#246 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:03,378 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/aae4cea7c977492c97c2f01747b53fd8 is 50, key is test_row_0/C:col10/1732368122045/Put/seqid=0 2024-11-23T13:22:03,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742119_1295 (size=12001) 2024-11-23T13:22:03,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742120_1296 (size=12104) 2024-11-23T13:22:03,744 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/10dc590f52fa45a1943dfdf27c945412 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/10dc590f52fa45a1943dfdf27c945412 2024-11-23T13:22:03,753 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 10dc590f52fa45a1943dfdf27c945412(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:03,753 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:03,753 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368123313; duration=0sec 2024-11-23T13:22:03,753 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:03,753 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:03,791 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6d57b3e763b3462d849a590c25019692 2024-11-23T13:22:03,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/f619aeaa001341b09f238131e7049a65 is 50, key is test_row_0/C:col10/1732368122070/Put/seqid=0 2024-11-23T13:22:03,803 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/aae4cea7c977492c97c2f01747b53fd8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/aae4cea7c977492c97c2f01747b53fd8 2024-11-23T13:22:03,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742121_1297 (size=12001) 2024-11-23T13:22:03,808 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into aae4cea7c977492c97c2f01747b53fd8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:03,808 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:03,808 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368123313; duration=0sec 2024-11-23T13:22:03,808 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:03,808 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:04,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:04,205 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/f619aeaa001341b09f238131e7049a65 2024-11-23T13:22:04,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3218c5e9e44c4c57be11d78d641d4727 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727 2024-11-23T13:22:04,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:04,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:04,214 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T13:22:04,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6d57b3e763b3462d849a590c25019692 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692 2024-11-23T13:22:04,220 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T13:22:04,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/f619aeaa001341b09f238131e7049a65 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65 2024-11-23T13:22:04,226 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65, entries=150, sequenceid=73, filesize=11.7 K 2024-11-23T13:22:04,226 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=40.25 KB/41220 for be94e75499ad728566c6b2d31ebaacd4 in 878ms, sequenceid=73, compaction requested=false 2024-11-23T13:22:04,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:04,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:04,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T13:22:04,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-23T13:22:04,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-23T13:22:04,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2560 sec 2024-11-23T13:22:04,230 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 2.2600 sec 2024-11-23T13:22:04,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:04,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:22:04,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:04,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:04,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:04,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:04,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:04,232 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:04,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/ead6e8e1a3a146f28f904281fd2f2ff8 is 50, key is test_row_0/A:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:04,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742122_1298 (size=12001) 2024-11-23T13:22:04,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368184289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:04,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368184391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:04,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368184594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:04,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/ead6e8e1a3a146f28f904281fd2f2ff8 2024-11-23T13:22:04,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/72a22a63a6ce42ae9fecf123a0954adb is 50, key is test_row_0/B:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:04,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742123_1299 (size=12001) 2024-11-23T13:22:04,895 DEBUG [master/ba2e440802a7:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region ee2ee0e805ec7a6fa6f5f67efb41c78f changed from -1.0 to 0.0, refreshing cache 2024-11-23T13:22:04,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368184898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,062 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/72a22a63a6ce42ae9fecf123a0954adb 2024-11-23T13:22:05,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ff6ce2a87aa14b8e8e514fb097b6fa80 is 50, key is test_row_0/C:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:05,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742124_1300 (size=12001) 2024-11-23T13:22:05,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:05,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368185159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:05,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368185160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,161 DEBUG [Thread-1285 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:05,161 DEBUG [Thread-1283 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:05,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:05,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368185169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,170 DEBUG [Thread-1279 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:05,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:05,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368185174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,176 DEBUG [Thread-1281 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:05,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368185402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:05,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ff6ce2a87aa14b8e8e514fb097b6fa80 2024-11-23T13:22:05,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/ead6e8e1a3a146f28f904281fd2f2ff8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8 2024-11-23T13:22:05,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T13:22:05,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/72a22a63a6ce42ae9fecf123a0954adb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb 2024-11-23T13:22:05,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T13:22:05,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ff6ce2a87aa14b8e8e514fb097b6fa80 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80 2024-11-23T13:22:05,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80, entries=150, sequenceid=87, filesize=11.7 K 2024-11-23T13:22:05,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for be94e75499ad728566c6b2d31ebaacd4 in 1265ms, sequenceid=87, compaction requested=true 2024-11-23T13:22:05,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:05,497 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:05,497 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:05,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:05,498 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:05,498 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:05,498 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:05,498 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/0c6d26c9f54a4360908ec1a4634e32d8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.3 K 2024-11-23T13:22:05,498 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:05,498 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:05,499 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c6d26c9f54a4360908ec1a4634e32d8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368119908 2024-11-23T13:22:05,499 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:05,499 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/10dc590f52fa45a1943dfdf27c945412, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.3 K 2024-11-23T13:22:05,499 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3218c5e9e44c4c57be11d78d641d4727, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732368122070 2024-11-23T13:22:05,499 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 10dc590f52fa45a1943dfdf27c945412, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368119908 2024-11-23T13:22:05,499 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ead6e8e1a3a146f28f904281fd2f2ff8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:05,499 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d57b3e763b3462d849a590c25019692, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732368122070 2024-11-23T13:22:05,500 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 72a22a63a6ce42ae9fecf123a0954adb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:05,507 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:05,508 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e06402cbb64944c4a6895974b1c40c95 is 50, key is test_row_0/A:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:05,508 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#251 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:05,509 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/4f37750166b5408fb5022486bb641545 is 50, key is test_row_0/B:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742125_1301 (size=12207) 2024-11-23T13:22:05,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742126_1302 (size=12207) 2024-11-23T13:22:05,540 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/4f37750166b5408fb5022486bb641545 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/4f37750166b5408fb5022486bb641545 2024-11-23T13:22:05,541 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e06402cbb64944c4a6895974b1c40c95 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e06402cbb64944c4a6895974b1c40c95 2024-11-23T13:22:05,546 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 4f37750166b5408fb5022486bb641545(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:05,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:05,546 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368125497; duration=0sec 2024-11-23T13:22:05,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:05,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:05,546 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:05,548 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:05,548 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/aae4cea7c977492c97c2f01747b53fd8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.3 K 2024-11-23T13:22:05,548 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into e06402cbb64944c4a6895974b1c40c95(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:05,548 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368125496; duration=0sec 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:05,548 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting aae4cea7c977492c97c2f01747b53fd8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1732368119908 2024-11-23T13:22:05,549 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f619aeaa001341b09f238131e7049a65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732368122070 2024-11-23T13:22:05,549 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ff6ce2a87aa14b8e8e514fb097b6fa80, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:05,559 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#253 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:05,560 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/80dab130c03e4abab8d3f5c5131efacd is 50, key is test_row_0/C:col10/1732368124227/Put/seqid=0 2024-11-23T13:22:05,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742127_1303 (size=12207) 2024-11-23T13:22:05,578 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/80dab130c03e4abab8d3f5c5131efacd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/80dab130c03e4abab8d3f5c5131efacd 2024-11-23T13:22:05,584 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into 80dab130c03e4abab8d3f5c5131efacd(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:05,584 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:05,584 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368125497; duration=0sec 2024-11-23T13:22:05,584 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:05,584 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:06,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-23T13:22:06,077 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-23T13:22:06,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:06,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-23T13:22:06,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:06,081 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:06,081 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:06,082 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:06,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:06,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:06,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:06,234 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:06,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:06,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/2caec71ba84a46ce97767bd925a708d2 is 50, key is test_row_0/A:col10/1732368124283/Put/seqid=0 2024-11-23T13:22:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742128_1304 (size=12001) 2024-11-23T13:22:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:06,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:06,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:06,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368186429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:06,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:06,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368186531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:06,644 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/2caec71ba84a46ce97767bd925a708d2 2024-11-23T13:22:06,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a5d8f10c735e4aca82be49fa2f2afddb is 50, key is test_row_0/B:col10/1732368124283/Put/seqid=0 2024-11-23T13:22:06,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742129_1305 (size=12001) 2024-11-23T13:22:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:06,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:06,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368186734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:07,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:07,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368187037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:07,060 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a5d8f10c735e4aca82be49fa2f2afddb 2024-11-23T13:22:07,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/650bb67cf0c3409dbaf423231f08123f is 50, key is test_row_0/C:col10/1732368124283/Put/seqid=0 2024-11-23T13:22:07,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742130_1306 (size=12001) 2024-11-23T13:22:07,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:07,474 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/650bb67cf0c3409dbaf423231f08123f 2024-11-23T13:22:07,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/2caec71ba84a46ce97767bd925a708d2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2 2024-11-23T13:22:07,483 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2, entries=150, sequenceid=115, filesize=11.7 K 2024-11-23T13:22:07,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a5d8f10c735e4aca82be49fa2f2afddb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb 2024-11-23T13:22:07,488 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb, entries=150, sequenceid=115, filesize=11.7 K 2024-11-23T13:22:07,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/650bb67cf0c3409dbaf423231f08123f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f 2024-11-23T13:22:07,506 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f, entries=150, sequenceid=115, filesize=11.7 K 2024-11-23T13:22:07,507 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for be94e75499ad728566c6b2d31ebaacd4 in 1273ms, sequenceid=115, compaction requested=false 2024-11-23T13:22:07,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:07,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:07,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-23T13:22:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-23T13:22:07,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-23T13:22:07,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4270 sec 2024-11-23T13:22:07,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.4310 sec 2024-11-23T13:22:07,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:07,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:22:07,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:07,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:07,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:07,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:07,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:07,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:07,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/71645ef0676a416c807bfaec80e5a4c4 is 50, key is test_row_0/A:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:07,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742131_1307 (size=12001) 2024-11-23T13:22:07,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:07,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368187593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:07,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:07,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368187696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:07,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:07,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368187898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:07,952 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/71645ef0676a416c807bfaec80e5a4c4 2024-11-23T13:22:07,960 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 is 50, key is test_row_0/B:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742132_1308 (size=12001) 2024-11-23T13:22:07,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 2024-11-23T13:22:07,972 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/9c859e43158b4f92a1dc942557defb95 is 50, key is test_row_0/C:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:07,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742133_1309 (size=12001) 2024-11-23T13:22:08,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-23T13:22:08,185 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-23T13:22:08,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:08,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-23T13:22:08,188 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:08,189 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:08,189 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:08,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368188200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:08,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:08,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T13:22:08,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:08,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:08,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:08,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/9c859e43158b4f92a1dc942557defb95 2024-11-23T13:22:08,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/71645ef0676a416c807bfaec80e5a4c4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4 2024-11-23T13:22:08,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T13:22:08,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 2024-11-23T13:22:08,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T13:22:08,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/9c859e43158b4f92a1dc942557defb95 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95 2024-11-23T13:22:08,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95, entries=150, sequenceid=127, filesize=11.7 K 2024-11-23T13:22:08,398 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for be94e75499ad728566c6b2d31ebaacd4 in 857ms, sequenceid=127, compaction requested=true 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:08,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:08,398 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:08,398 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:08,399 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:08,399 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:08,399 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:08,399 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,399 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:08,399 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,399 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e06402cbb64944c4a6895974b1c40c95, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.4 K 2024-11-23T13:22:08,399 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/4f37750166b5408fb5022486bb641545, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.4 K 2024-11-23T13:22:08,400 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e06402cbb64944c4a6895974b1c40c95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:08,400 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f37750166b5408fb5022486bb641545, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:08,401 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2caec71ba84a46ce97767bd925a708d2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732368124283 2024-11-23T13:22:08,401 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a5d8f10c735e4aca82be49fa2f2afddb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732368124283 2024-11-23T13:22:08,401 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71645ef0676a416c807bfaec80e5a4c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:08,401 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 02f06ad4ce6e46dd870bc7dc19a8c2b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:08,410 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:08,410 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:08,411 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/1016c5772a544bf4b69dc910accbd480 is 50, key is test_row_0/B:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:08,411 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/f84ed36ef4b647cfa5b2b828d0392019 is 50, key is test_row_0/A:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:08,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742134_1310 (size=12309) 2024-11-23T13:22:08,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742135_1311 (size=12309) 2024-11-23T13:22:08,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:08,493 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:08,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-23T13:22:08,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,494 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:08,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:08,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/1c7095810b9244f89da94d044b26ff7b is 50, key is test_row_0/A:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:08,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742136_1312 (size=12151) 2024-11-23T13:22:08,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:08,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:08,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:08,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368188725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:08,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:08,821 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/1016c5772a544bf4b69dc910accbd480 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/1016c5772a544bf4b69dc910accbd480 2024-11-23T13:22:08,822 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/f84ed36ef4b647cfa5b2b828d0392019 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/f84ed36ef4b647cfa5b2b828d0392019 2024-11-23T13:22:08,828 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into f84ed36ef4b647cfa5b2b828d0392019(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:08,828 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:08,828 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 1016c5772a544bf4b69dc910accbd480(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:08,828 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:08,828 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368128398; duration=0sec 2024-11-23T13:22:08,828 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:08,828 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:08,828 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:08,829 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368128398; duration=0sec 2024-11-23T13:22:08,829 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:08,829 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:08,829 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36209 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:08,830 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:08,830 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:08,830 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/80dab130c03e4abab8d3f5c5131efacd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=35.4 K 2024-11-23T13:22:08,830 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 80dab130c03e4abab8d3f5c5131efacd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1732368124214 2024-11-23T13:22:08,830 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 650bb67cf0c3409dbaf423231f08123f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1732368124283 2024-11-23T13:22:08,831 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c859e43158b4f92a1dc942557defb95, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:08,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:08,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368188829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:08,839 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#263 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:08,839 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/e6e7df10a6324aa2bbc20e076375dee1 is 50, key is test_row_0/C:col10/1732368126422/Put/seqid=0 2024-11-23T13:22:08,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742137_1313 (size=12309) 2024-11-23T13:22:08,850 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/e6e7df10a6324aa2bbc20e076375dee1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/e6e7df10a6324aa2bbc20e076375dee1 2024-11-23T13:22:08,855 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into e6e7df10a6324aa2bbc20e076375dee1(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:08,855 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:08,855 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368128398; duration=0sec 2024-11-23T13:22:08,855 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:08,855 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:08,904 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/1c7095810b9244f89da94d044b26ff7b 2024-11-23T13:22:08,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/d78ea92880e3449c96438bbfa5007e9b is 50, key is test_row_0/B:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:08,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742138_1314 (size=12151) 2024-11-23T13:22:08,919 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/d78ea92880e3449c96438bbfa5007e9b 2024-11-23T13:22:08,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/b533eeaa89e9449093a1eaad85384513 is 50, key is test_row_0/C:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:08,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742139_1315 (size=12151) 2024-11-23T13:22:09,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368189034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368189173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,174 DEBUG [Thread-1285 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:09,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368189175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,177 DEBUG [Thread-1279 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8149 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:09,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368189186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,187 DEBUG [Thread-1283 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:09,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368189201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,202 DEBUG [Thread-1281 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:09,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:09,335 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/b533eeaa89e9449093a1eaad85384513 2024-11-23T13:22:09,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368189336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/1c7095810b9244f89da94d044b26ff7b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b 2024-11-23T13:22:09,345 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b, entries=150, sequenceid=151, filesize=11.9 K 2024-11-23T13:22:09,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/d78ea92880e3449c96438bbfa5007e9b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b 2024-11-23T13:22:09,349 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b, entries=150, sequenceid=151, filesize=11.9 K 2024-11-23T13:22:09,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/b533eeaa89e9449093a1eaad85384513 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513 2024-11-23T13:22:09,356 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513, entries=150, sequenceid=151, filesize=11.9 K 2024-11-23T13:22:09,357 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be94e75499ad728566c6b2d31ebaacd4 in 863ms, sequenceid=151, compaction requested=false 2024-11-23T13:22:09,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:09,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:09,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-23T13:22:09,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-23T13:22:09,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-23T13:22:09,360 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1690 sec 2024-11-23T13:22:09,361 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.1740 sec 2024-11-23T13:22:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:09,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:22:09,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:09,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:09,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:09,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:09,841 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:09,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:09,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3dfb4e310e764b1289fdd6b3f3b17aaa is 50, key is test_row_1/A:col10/1732368129840/Put/seqid=0 2024-11-23T13:22:09,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742140_1316 (size=9757) 2024-11-23T13:22:09,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368189888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:09,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368189991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368190193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3dfb4e310e764b1289fdd6b3f3b17aaa 2024-11-23T13:22:10,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/199d181a04c848e38e00fef04f5994e6 is 50, key is test_row_1/B:col10/1732368129840/Put/seqid=0 2024-11-23T13:22:10,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742141_1317 (size=9757) 2024-11-23T13:22:10,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-23T13:22:10,292 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-23T13:22:10,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:10,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-23T13:22:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:10,295 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:10,296 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:10,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:10,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:10,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:10,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:10,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368190495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:10,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:10,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:10,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/199d181a04c848e38e00fef04f5994e6 2024-11-23T13:22:10,689 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/4767b8f0a6ab4123bcce5303ba5d020c is 50, key is test_row_1/C:col10/1732368129840/Put/seqid=0 2024-11-23T13:22:10,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742142_1318 (size=9757) 2024-11-23T13:22:10,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:10,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:10,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:10,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:10,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:10,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:10,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:11,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:11,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368191002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:11,060 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:11,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:11,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:11,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:11,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:11,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:11,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/4767b8f0a6ab4123bcce5303ba5d020c 2024-11-23T13:22:11,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/3dfb4e310e764b1289fdd6b3f3b17aaa as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa 2024-11-23T13:22:11,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa, entries=100, sequenceid=167, filesize=9.5 K 2024-11-23T13:22:11,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/199d181a04c848e38e00fef04f5994e6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6 2024-11-23T13:22:11,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6, entries=100, sequenceid=167, filesize=9.5 K 2024-11-23T13:22:11,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/4767b8f0a6ab4123bcce5303ba5d020c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c 2024-11-23T13:22:11,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c, entries=100, sequenceid=167, filesize=9.5 K 2024-11-23T13:22:11,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for be94e75499ad728566c6b2d31ebaacd4 in 1273ms, sequenceid=167, compaction requested=true 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:11,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:11,114 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:11,114 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:11,115 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:11,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:11,115 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:11,115 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:11,115 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,115 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,115 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/f84ed36ef4b647cfa5b2b828d0392019, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=33.4 K 2024-11-23T13:22:11,115 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/1016c5772a544bf4b69dc910accbd480, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=33.4 K 2024-11-23T13:22:11,116 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1016c5772a544bf4b69dc910accbd480, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:11,116 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f84ed36ef4b647cfa5b2b828d0392019, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:11,117 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c7095810b9244f89da94d044b26ff7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732368127570 2024-11-23T13:22:11,117 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting d78ea92880e3449c96438bbfa5007e9b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732368127570 2024-11-23T13:22:11,117 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3dfb4e310e764b1289fdd6b3f3b17aaa, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368128724 2024-11-23T13:22:11,117 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 199d181a04c848e38e00fef04f5994e6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368128724 2024-11-23T13:22:11,130 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:11,131 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3f35fa450ffb443b83effe8a81c911a6 is 50, key is test_row_0/B:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:11,131 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:11,132 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e6b8a1559d0b4f2a8705e18e5b6bb105 is 50, key is test_row_0/A:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:11,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742143_1319 (size=12561) 2024-11-23T13:22:11,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742144_1320 (size=12561) 2024-11-23T13:22:11,155 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e6b8a1559d0b4f2a8705e18e5b6bb105 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e6b8a1559d0b4f2a8705e18e5b6bb105 2024-11-23T13:22:11,161 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3f35fa450ffb443b83effe8a81c911a6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f35fa450ffb443b83effe8a81c911a6 2024-11-23T13:22:11,162 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into e6b8a1559d0b4f2a8705e18e5b6bb105(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:11,162 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:11,162 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368131114; duration=0sec 2024-11-23T13:22:11,163 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:11,163 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:11,163 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:11,164 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:11,164 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:11,164 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,164 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/e6e7df10a6324aa2bbc20e076375dee1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=33.4 K 2024-11-23T13:22:11,165 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6e7df10a6324aa2bbc20e076375dee1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732368126422 2024-11-23T13:22:11,166 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b533eeaa89e9449093a1eaad85384513, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1732368127570 2024-11-23T13:22:11,166 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4767b8f0a6ab4123bcce5303ba5d020c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368128724 2024-11-23T13:22:11,168 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 3f35fa450ffb443b83effe8a81c911a6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:11,168 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:11,168 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368131114; duration=0sec 2024-11-23T13:22:11,168 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:11,168 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:11,174 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:11,174 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/bc26ecba3e3c4c45b01c289b8dc5f9fa is 50, key is test_row_0/C:col10/1732368127590/Put/seqid=0 2024-11-23T13:22:11,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742145_1321 (size=12561) 2024-11-23T13:22:11,186 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/bc26ecba3e3c4c45b01c289b8dc5f9fa as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc26ecba3e3c4c45b01c289b8dc5f9fa 2024-11-23T13:22:11,190 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into bc26ecba3e3c4c45b01c289b8dc5f9fa(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:11,191 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:11,191 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368131114; duration=0sec 2024-11-23T13:22:11,191 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:11,191 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:11,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:11,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-23T13:22:11,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,214 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:11,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:11,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/fd884ff1523c417398fcf0ca5a781cf1 is 50, key is test_row_0/A:col10/1732368129878/Put/seqid=0 2024-11-23T13:22:11,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742146_1322 (size=12151) 2024-11-23T13:22:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:11,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/fd884ff1523c417398fcf0ca5a781cf1 2024-11-23T13:22:11,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/36aeadafa94e47c9bb39e24ba10a37db is 50, key is test_row_0/B:col10/1732368129878/Put/seqid=0 2024-11-23T13:22:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742147_1323 (size=12151) 2024-11-23T13:22:11,643 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/36aeadafa94e47c9bb39e24ba10a37db 2024-11-23T13:22:11,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/72e8b5f7ee7c496f98524708def9a9ac is 50, key is test_row_0/C:col10/1732368129878/Put/seqid=0 2024-11-23T13:22:11,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742148_1324 (size=12151) 2024-11-23T13:22:11,655 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/72e8b5f7ee7c496f98524708def9a9ac 2024-11-23T13:22:11,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/fd884ff1523c417398fcf0ca5a781cf1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1 2024-11-23T13:22:11,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1, entries=150, sequenceid=193, filesize=11.9 K 2024-11-23T13:22:11,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/36aeadafa94e47c9bb39e24ba10a37db as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db 2024-11-23T13:22:11,668 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db, entries=150, sequenceid=193, filesize=11.9 K 2024-11-23T13:22:11,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/72e8b5f7ee7c496f98524708def9a9ac as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac 2024-11-23T13:22:11,673 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac, entries=150, sequenceid=193, filesize=11.9 K 2024-11-23T13:22:11,675 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for be94e75499ad728566c6b2d31ebaacd4 in 460ms, sequenceid=193, compaction requested=false 2024-11-23T13:22:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-23T13:22:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-23T13:22:11,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-23T13:22:11,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3800 sec 2024-11-23T13:22:11,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.3850 sec 2024-11-23T13:22:12,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:12,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:12,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:12,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e908d16326b145dba65fb7a7b1f16afc is 50, key is test_row_0/A:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:12,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742149_1325 (size=12147) 2024-11-23T13:22:12,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:12,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368192075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:12,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368192177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:12,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368192380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-23T13:22:12,408 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-23T13:22:12,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:12,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-23T13:22:12,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:12,411 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:12,411 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:12,411 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:12,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e908d16326b145dba65fb7a7b1f16afc 2024-11-23T13:22:12,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6c70235cdbd143c6baf097c3f85ada14 is 50, key is test_row_0/B:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:12,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742150_1326 (size=9757) 2024-11-23T13:22:12,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:12,563 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:12,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,564 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:12,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368192682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:12,716 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:12,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:12,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6c70235cdbd143c6baf097c3f85ada14 2024-11-23T13:22:12,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d571b3d022d40f7bb216a7670e9cee8 is 50, key is test_row_0/C:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:12,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742151_1327 (size=9757) 2024-11-23T13:22:12,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:12,869 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:12,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:12,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:12,870 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:12,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:13,022 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:13,022 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:13,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:13,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,022 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,174 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:13,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:13,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:13,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,175 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:13,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:13,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368193188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:13,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d571b3d022d40f7bb216a7670e9cee8 2024-11-23T13:22:13,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e908d16326b145dba65fb7a7b1f16afc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc 2024-11-23T13:22:13,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc, entries=150, sequenceid=204, filesize=11.9 K 2024-11-23T13:22:13,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6c70235cdbd143c6baf097c3f85ada14 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14 2024-11-23T13:22:13,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14, entries=100, sequenceid=204, filesize=9.5 K 2024-11-23T13:22:13,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d571b3d022d40f7bb216a7670e9cee8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8 2024-11-23T13:22:13,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8, entries=100, sequenceid=204, filesize=9.5 K 2024-11-23T13:22:13,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for be94e75499ad728566c6b2d31ebaacd4 in 1252ms, sequenceid=204, compaction requested=true 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:13,277 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:13,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:13,277 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:13,278 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36859 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:13,278 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:13,278 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,278 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e6b8a1559d0b4f2a8705e18e5b6bb105, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=36.0 K 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6b8a1559d0b4f2a8705e18e5b6bb105, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368127590 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:13,279 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,279 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f35fa450ffb443b83effe8a81c911a6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=33.7 K 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd884ff1523c417398fcf0ca5a781cf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732368129878 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f35fa450ffb443b83effe8a81c911a6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368127590 2024-11-23T13:22:13,279 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e908d16326b145dba65fb7a7b1f16afc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368132012 2024-11-23T13:22:13,280 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 36aeadafa94e47c9bb39e24ba10a37db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732368129878 2024-11-23T13:22:13,280 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c70235cdbd143c6baf097c3f85ada14, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368132012 2024-11-23T13:22:13,287 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#278 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:13,288 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/d99c51be248941b09be4c4df7a80f287 is 50, key is test_row_0/A:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:13,290 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:13,290 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/902c456654b2475bad26f186c91dde5a is 50, key is test_row_0/B:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:13,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742152_1328 (size=12663) 2024-11-23T13:22:13,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742153_1329 (size=12663) 2024-11-23T13:22:13,307 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/902c456654b2475bad26f186c91dde5a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/902c456654b2475bad26f186c91dde5a 2024-11-23T13:22:13,311 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 902c456654b2475bad26f186c91dde5a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:13,312 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:13,312 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368133277; duration=0sec 2024-11-23T13:22:13,312 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:13,312 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:13,312 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:13,313 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34469 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:13,313 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:13,313 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,313 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc26ecba3e3c4c45b01c289b8dc5f9fa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=33.7 K 2024-11-23T13:22:13,314 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bc26ecba3e3c4c45b01c289b8dc5f9fa, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1732368127590 2024-11-23T13:22:13,314 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 72e8b5f7ee7c496f98524708def9a9ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1732368129878 2024-11-23T13:22:13,314 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d571b3d022d40f7bb216a7670e9cee8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368132012 2024-11-23T13:22:13,321 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#280 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:13,322 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/18a275f78be84c39b77491a3bcd2374a is 50, key is test_row_0/C:col10/1732368132024/Put/seqid=0 2024-11-23T13:22:13,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742154_1330 (size=12663) 2024-11-23T13:22:13,327 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:13,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:13,328 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:13,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:13,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/d4f7c81eea9441078a4092d59a5ffaf7 is 50, key is test_row_0/A:col10/1732368132073/Put/seqid=0 2024-11-23T13:22:13,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742155_1331 (size=12151) 2024-11-23T13:22:13,338 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/d4f7c81eea9441078a4092d59a5ffaf7 2024-11-23T13:22:13,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/0a044195d545490f8d68c87ba0fb565a is 50, key is test_row_0/B:col10/1732368132073/Put/seqid=0 2024-11-23T13:22:13,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742156_1332 (size=12151) 2024-11-23T13:22:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:13,703 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/d99c51be248941b09be4c4df7a80f287 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d99c51be248941b09be4c4df7a80f287 2024-11-23T13:22:13,708 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into d99c51be248941b09be4c4df7a80f287(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:13,709 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:13,709 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368133277; duration=0sec 2024-11-23T13:22:13,709 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:13,709 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:13,731 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/18a275f78be84c39b77491a3bcd2374a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/18a275f78be84c39b77491a3bcd2374a 2024-11-23T13:22:13,736 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into 18a275f78be84c39b77491a3bcd2374a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:13,736 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:13,737 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368133277; duration=0sec 2024-11-23T13:22:13,737 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:13,737 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:13,750 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/0a044195d545490f8d68c87ba0fb565a 2024-11-23T13:22:13,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a008f5367a384d019722274fd996e2f1 is 50, key is test_row_0/C:col10/1732368132073/Put/seqid=0 2024-11-23T13:22:13,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742157_1333 (size=12151) 2024-11-23T13:22:14,162 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a008f5367a384d019722274fd996e2f1 2024-11-23T13:22:14,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/d4f7c81eea9441078a4092d59a5ffaf7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7 2024-11-23T13:22:14,172 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7, entries=150, sequenceid=230, filesize=11.9 K 2024-11-23T13:22:14,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/0a044195d545490f8d68c87ba0fb565a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a 2024-11-23T13:22:14,177 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a, entries=150, sequenceid=230, filesize=11.9 K 2024-11-23T13:22:14,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a008f5367a384d019722274fd996e2f1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1 2024-11-23T13:22:14,182 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1, entries=150, sequenceid=230, filesize=11.9 K 2024-11-23T13:22:14,183 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for be94e75499ad728566c6b2d31ebaacd4 in 854ms, sequenceid=230, compaction requested=false 2024-11-23T13:22:14,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:14,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:14,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T13:22:14,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-23T13:22:14,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-23T13:22:14,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7730 sec 2024-11-23T13:22:14,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.7770 sec 2024-11-23T13:22:14,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:14,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:22:14,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:14,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:14,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/9e79602dfc7f435cbf4b82c65f2334a6 is 50, key is test_row_0/A:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742158_1334 (size=14541) 2024-11-23T13:22:14,219 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/9e79602dfc7f435cbf4b82c65f2334a6 2024-11-23T13:22:14,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b900df02610b4e708b0046de0f3e7735 is 50, key is test_row_0/B:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:14,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742159_1335 (size=12151) 2024-11-23T13:22:14,230 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b900df02610b4e708b0046de0f3e7735 2024-11-23T13:22:14,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/38d3f5ce36d84205899dfb59d5c67a7d is 50, key is test_row_0/C:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:14,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742160_1336 (size=12151) 2024-11-23T13:22:14,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368194270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:14,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:14,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368194373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:14,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-23T13:22:14,520 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-23T13:22:14,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:14,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-23T13:22:14,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T13:22:14,523 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:14,524 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:14,524 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:14,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:14,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368194576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:14,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T13:22:14,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/38d3f5ce36d84205899dfb59d5c67a7d 2024-11-23T13:22:14,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/9e79602dfc7f435cbf4b82c65f2334a6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6 2024-11-23T13:22:14,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6, entries=200, sequenceid=243, filesize=14.2 K 2024-11-23T13:22:14,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b900df02610b4e708b0046de0f3e7735 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735 2024-11-23T13:22:14,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735, entries=150, sequenceid=243, filesize=11.9 K 2024-11-23T13:22:14,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/38d3f5ce36d84205899dfb59d5c67a7d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d 2024-11-23T13:22:14,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d, entries=150, sequenceid=243, filesize=11.9 K 2024-11-23T13:22:14,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for be94e75499ad728566c6b2d31ebaacd4 in 454ms, sequenceid=243, compaction requested=true 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:14,660 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:14,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:14,660 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:14,661 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:14,661 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:14,661 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:14,661 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:14,661 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:14,661 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:14,661 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d99c51be248941b09be4c4df7a80f287, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=38.4 K 2024-11-23T13:22:14,661 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/902c456654b2475bad26f186c91dde5a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=36.1 K 2024-11-23T13:22:14,662 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d99c51be248941b09be4c4df7a80f287, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368129884 2024-11-23T13:22:14,662 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 902c456654b2475bad26f186c91dde5a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368129884 2024-11-23T13:22:14,664 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a044195d545490f8d68c87ba0fb565a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732368132061 2024-11-23T13:22:14,664 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4f7c81eea9441078a4092d59a5ffaf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732368132061 2024-11-23T13:22:14,664 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b900df02610b4e708b0046de0f3e7735, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134201 2024-11-23T13:22:14,664 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e79602dfc7f435cbf4b82c65f2334a6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134193 2024-11-23T13:22:14,672 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#287 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:14,673 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/fe42c14beddf4730beabdead36c823ea is 50, key is test_row_0/B:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:14,674 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:14,675 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e921cb0fdaad4f9a898dfaba9a616a7f is 50, key is test_row_0/A:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:14,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:14,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-23T13:22:14,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:14,676 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:14,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:14,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742161_1337 (size=12765) 2024-11-23T13:22:14,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/61ecbceeb1dd4bb48119d117c970bcb7 is 50, key is test_row_0/A:col10/1732368134269/Put/seqid=0 2024-11-23T13:22:14,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742162_1338 (size=12765) 2024-11-23T13:22:14,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742163_1339 (size=12251) 2024-11-23T13:22:14,730 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/61ecbceeb1dd4bb48119d117c970bcb7 2024-11-23T13:22:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a204db3c00354527999733eb9410ed79 is 50, key is test_row_0/B:col10/1732368134269/Put/seqid=0 2024-11-23T13:22:14,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742164_1340 (size=12251) 2024-11-23T13:22:14,742 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a204db3c00354527999733eb9410ed79 2024-11-23T13:22:14,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d490c12c2ca486b89986d5e5c13e502 is 50, key is test_row_0/C:col10/1732368134269/Put/seqid=0 2024-11-23T13:22:14,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742165_1341 (size=12251) 2024-11-23T13:22:14,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T13:22:14,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:14,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:14,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:14,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368194898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,088 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/fe42c14beddf4730beabdead36c823ea as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/fe42c14beddf4730beabdead36c823ea 2024-11-23T13:22:15,092 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into fe42c14beddf4730beabdead36c823ea(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:15,092 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,092 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368134660; duration=0sec 2024-11-23T13:22:15,092 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:15,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:15,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:15,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:15,094 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:15,094 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,094 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/18a275f78be84c39b77491a3bcd2374a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=36.1 K 2024-11-23T13:22:15,094 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 18a275f78be84c39b77491a3bcd2374a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1732368129884 2024-11-23T13:22:15,094 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a008f5367a384d019722274fd996e2f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732368132061 2024-11-23T13:22:15,095 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 38d3f5ce36d84205899dfb59d5c67a7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134201 2024-11-23T13:22:15,102 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#292 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:15,102 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ae1aa846ca304976acfa903ca92c32dd is 50, key is test_row_0/C:col10/1732368134205/Put/seqid=0 2024-11-23T13:22:15,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742166_1342 (size=12765) 2024-11-23T13:22:15,119 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e921cb0fdaad4f9a898dfaba9a616a7f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e921cb0fdaad4f9a898dfaba9a616a7f 2024-11-23T13:22:15,124 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into e921cb0fdaad4f9a898dfaba9a616a7f(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:15,124 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,124 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368134660; duration=0sec 2024-11-23T13:22:15,124 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:15,124 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:15,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T13:22:15,155 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=268 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d490c12c2ca486b89986d5e5c13e502 2024-11-23T13:22:15,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/61ecbceeb1dd4bb48119d117c970bcb7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7 2024-11-23T13:22:15,165 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7, entries=150, sequenceid=268, filesize=12.0 K 2024-11-23T13:22:15,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/a204db3c00354527999733eb9410ed79 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79 2024-11-23T13:22:15,170 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79, entries=150, sequenceid=268, filesize=12.0 K 2024-11-23T13:22:15,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/5d490c12c2ca486b89986d5e5c13e502 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502 2024-11-23T13:22:15,175 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502, entries=150, sequenceid=268, filesize=12.0 K 2024-11-23T13:22:15,176 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for be94e75499ad728566c6b2d31ebaacd4 in 500ms, sequenceid=268, compaction requested=false 2024-11-23T13:22:15,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-23T13:22:15,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-23T13:22:15,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-23T13:22:15,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 653 msec 2024-11-23T13:22:15,181 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 659 msec 2024-11-23T13:22:15,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:15,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:22:15,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e3650080f80d42fea4f58dd2c854f6b5 is 50, key is test_row_0/A:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742167_1343 (size=14741) 2024-11-23T13:22:15,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,514 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ae1aa846ca304976acfa903ca92c32dd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ae1aa846ca304976acfa903ca92c32dd 2024-11-23T13:22:15,519 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into ae1aa846ca304976acfa903ca92c32dd(size=12.5 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:15,519 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,519 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368134660; duration=0sec 2024-11-23T13:22:15,519 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:15,519 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:15,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e3650080f80d42fea4f58dd2c854f6b5 2024-11-23T13:22:15,625 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/e89ab1a840b94ea9927ea15715457e17 is 50, key is test_row_0/B:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-23T13:22:15,626 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-23T13:22:15,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:15,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-23T13:22:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T13:22:15,630 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:15,631 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:15,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742168_1344 (size=12301) 2024-11-23T13:22:15,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/e89ab1a840b94ea9927ea15715457e17 2024-11-23T13:22:15,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/bc448575177842e38d8c0cd02a2f54a5 is 50, key is test_row_0/C:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742169_1345 (size=12301) 2024-11-23T13:22:15,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/bc448575177842e38d8c0cd02a2f54a5 2024-11-23T13:22:15,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/e3650080f80d42fea4f58dd2c854f6b5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5 2024-11-23T13:22:15,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5, entries=200, sequenceid=282, filesize=14.4 K 2024-11-23T13:22:15,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/e89ab1a840b94ea9927ea15715457e17 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17 2024-11-23T13:22:15,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17, entries=150, sequenceid=282, filesize=12.0 K 2024-11-23T13:22:15,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/bc448575177842e38d8c0cd02a2f54a5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5 2024-11-23T13:22:15,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5, entries=150, sequenceid=282, filesize=12.0 K 2024-11-23T13:22:15,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for be94e75499ad728566c6b2d31ebaacd4 in 459ms, sequenceid=282, compaction requested=true 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:15,665 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:15,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:15,665 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:15,667 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:15,667 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:15,667 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,667 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/fe42c14beddf4730beabdead36c823ea, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=36.4 K 2024-11-23T13:22:15,667 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39757 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:15,667 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:15,667 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,668 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fe42c14beddf4730beabdead36c823ea, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134201 2024-11-23T13:22:15,668 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e921cb0fdaad4f9a898dfaba9a616a7f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=38.8 K 2024-11-23T13:22:15,668 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a204db3c00354527999733eb9410ed79, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732368134230 2024-11-23T13:22:15,668 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e921cb0fdaad4f9a898dfaba9a616a7f, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134201 2024-11-23T13:22:15,668 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e89ab1a840b94ea9927ea15715457e17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:15,668 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61ecbceeb1dd4bb48119d117c970bcb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732368134230 2024-11-23T13:22:15,669 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3650080f80d42fea4f58dd2c854f6b5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:15,675 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#296 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:15,676 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/556e36a3228a4855867d2110efe4ad7f is 50, key is test_row_0/A:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,678 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:15,679 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/7fe4eb891264455ebd07b3b40a1e0c38 is 50, key is test_row_0/B:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742170_1346 (size=13017) 2024-11-23T13:22:15,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742171_1347 (size=13017) 2024-11-23T13:22:15,699 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/7fe4eb891264455ebd07b3b40a1e0c38 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7fe4eb891264455ebd07b3b40a1e0c38 2024-11-23T13:22:15,704 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 7fe4eb891264455ebd07b3b40a1e0c38(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:15,704 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:15,704 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=13, startTime=1732368135665; duration=0sec 2024-11-23T13:22:15,704 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:15,704 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:15,704 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:15,705 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37317 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:15,705 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:15,705 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,705 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ae1aa846ca304976acfa903ca92c32dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=36.4 K 2024-11-23T13:22:15,706 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ae1aa846ca304976acfa903ca92c32dd, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1732368134201 2024-11-23T13:22:15,706 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d490c12c2ca486b89986d5e5c13e502, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=268, earliestPutTs=1732368134230 2024-11-23T13:22:15,706 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bc448575177842e38d8c0cd02a2f54a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:15,713 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:15,714 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/b63580f2144b4124a33aa2f5d406a7f3 is 50, key is test_row_0/C:col10/1732368134885/Put/seqid=0 2024-11-23T13:22:15,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742172_1348 (size=13017) 2024-11-23T13:22:15,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T13:22:15,783 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-23T13:22:15,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:15,784 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:15,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:15,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/623eb845659c42dba9b99ae959783e70 is 50, key is test_row_0/A:col10/1732368135260/Put/seqid=0 2024-11-23T13:22:15,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742173_1349 (size=12301) 2024-11-23T13:22:15,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:15,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:15,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:15,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T13:22:15,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:15,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368195998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:16,099 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/556e36a3228a4855867d2110efe4ad7f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/556e36a3228a4855867d2110efe4ad7f 2024-11-23T13:22:16,103 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into 556e36a3228a4855867d2110efe4ad7f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:16,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:16,103 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=13, startTime=1732368135665; duration=0sec 2024-11-23T13:22:16,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:16,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:16,123 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/b63580f2144b4124a33aa2f5d406a7f3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b63580f2144b4124a33aa2f5d406a7f3 2024-11-23T13:22:16,128 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into b63580f2144b4124a33aa2f5d406a7f3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:16,129 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:16,129 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=13, startTime=1732368135665; duration=0sec 2024-11-23T13:22:16,129 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:16,129 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:16,194 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/623eb845659c42dba9b99ae959783e70 2024-11-23T13:22:16,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368196201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:16,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/421dfe5a6894403fa7e14960f7caf554 is 50, key is test_row_0/B:col10/1732368135260/Put/seqid=0 2024-11-23T13:22:16,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742174_1350 (size=12301) 2024-11-23T13:22:16,226 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/421dfe5a6894403fa7e14960f7caf554 2024-11-23T13:22:16,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T13:22:16,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ea31157852e44b03a7263dd83d4412de is 50, key is test_row_0/C:col10/1732368135260/Put/seqid=0 2024-11-23T13:22:16,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742175_1351 (size=12301) 2024-11-23T13:22:16,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:16,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368196505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:16,652 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=308 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ea31157852e44b03a7263dd83d4412de 2024-11-23T13:22:16,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/623eb845659c42dba9b99ae959783e70 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70 2024-11-23T13:22:16,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70, entries=150, sequenceid=308, filesize=12.0 K 2024-11-23T13:22:16,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/421dfe5a6894403fa7e14960f7caf554 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554 2024-11-23T13:22:16,665 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554, entries=150, sequenceid=308, filesize=12.0 K 2024-11-23T13:22:16,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/ea31157852e44b03a7263dd83d4412de as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de 2024-11-23T13:22:16,670 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de, entries=150, sequenceid=308, filesize=12.0 K 2024-11-23T13:22:16,671 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be94e75499ad728566c6b2d31ebaacd4 in 887ms, sequenceid=308, compaction requested=false 2024-11-23T13:22:16,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:16,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:16,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-23T13:22:16,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-23T13:22:16,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-23T13:22:16,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0410 sec 2024-11-23T13:22:16,675 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.0450 sec 2024-11-23T13:22:16,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-23T13:22:16,734 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-23T13:22:16,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:16,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-23T13:22:16,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T13:22:16,736 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:16,737 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:16,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:16,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T13:22:16,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:16,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-23T13:22:16,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:16,889 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:22:16,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:16,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:16,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:16,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:16,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:16,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:16,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/905b865c012e488b9f1a595362dc5bab is 50, key is test_row_0/A:col10/1732368135894/Put/seqid=0 2024-11-23T13:22:16,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742176_1352 (size=12301) 2024-11-23T13:22:17,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:17,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:17,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T13:22:17,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 329 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368197054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:17,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 331 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368197158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:17,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/905b865c012e488b9f1a595362dc5bab 2024-11-23T13:22:17,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/243b8fb2ad1748f19417fb8383fa62cd is 50, key is test_row_0/B:col10/1732368135894/Put/seqid=0 2024-11-23T13:22:17,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742177_1353 (size=12301) 2024-11-23T13:22:17,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T13:22:17,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:17,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 333 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368197361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:17,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:17,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 335 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368197664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:17,713 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/243b8fb2ad1748f19417fb8383fa62cd 2024-11-23T13:22:17,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/458ecaf1e6e849739d375e50124f23f9 is 50, key is test_row_0/C:col10/1732368135894/Put/seqid=0 2024-11-23T13:22:17,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742178_1354 (size=12301) 2024-11-23T13:22:17,725 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=322 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/458ecaf1e6e849739d375e50124f23f9 2024-11-23T13:22:17,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/905b865c012e488b9f1a595362dc5bab as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab 2024-11-23T13:22:17,732 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab, entries=150, sequenceid=322, filesize=12.0 K 2024-11-23T13:22:17,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/243b8fb2ad1748f19417fb8383fa62cd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd 2024-11-23T13:22:17,738 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd, entries=150, sequenceid=322, filesize=12.0 K 2024-11-23T13:22:17,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/458ecaf1e6e849739d375e50124f23f9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9 2024-11-23T13:22:17,742 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9, entries=150, sequenceid=322, filesize=12.0 K 2024-11-23T13:22:17,743 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for be94e75499ad728566c6b2d31ebaacd4 in 854ms, sequenceid=322, compaction requested=true 2024-11-23T13:22:17,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:17,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:17,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-23T13:22:17,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-23T13:22:17,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-23T13:22:17,745 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0070 sec 2024-11-23T13:22:17,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.0110 sec 2024-11-23T13:22:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-23T13:22:17,839 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-23T13:22:17,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-23T13:22:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:17,842 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:17,843 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:17,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:17,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:17,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:17,995 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:17,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:18,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/dc7e210363c9477da8418f9bb8eaebf7 is 50, key is test_row_0/A:col10/1732368137053/Put/seqid=0 2024-11-23T13:22:18,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742179_1355 (size=12301) 2024-11-23T13:22:18,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:18,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. as already flushing 2024-11-23T13:22:18,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:18,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 346 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368198190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:18,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:18,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 348 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368198293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:18,405 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/dc7e210363c9477da8418f9bb8eaebf7 2024-11-23T13:22:18,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 is 50, key is test_row_0/B:col10/1732368137053/Put/seqid=0 2024-11-23T13:22:18,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742180_1356 (size=12301) 2024-11-23T13:22:18,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:18,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:18,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 350 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368198495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:18,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:18,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 352 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368198798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:18,816 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 2024-11-23T13:22:18,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/2399ff80b4634830a2f9374e3ad5d4ed is 50, key is test_row_0/C:col10/1732368137053/Put/seqid=0 2024-11-23T13:22:18,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742181_1357 (size=12301) 2024-11-23T13:22:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:19,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37466 deadline: 1732368199221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,223 DEBUG [Thread-1283 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18194 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:19,229 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=346 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/2399ff80b4634830a2f9374e3ad5d4ed 2024-11-23T13:22:19,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37486 deadline: 1732368199231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,234 DEBUG [Thread-1279 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18206 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:19,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/dc7e210363c9477da8418f9bb8eaebf7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7 2024-11-23T13:22:19,241 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7, entries=150, sequenceid=346, filesize=12.0 K 2024-11-23T13:22:19,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 2024-11-23T13:22:19,252 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5, entries=150, sequenceid=346, filesize=12.0 K 2024-11-23T13:22:19,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/2399ff80b4634830a2f9374e3ad5d4ed as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed 2024-11-23T13:22:19,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37446 deadline: 1732368199253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,257 DEBUG [Thread-1285 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18226 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:19,258 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed, entries=150, sequenceid=346, filesize=12.0 K 2024-11-23T13:22:19,259 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for be94e75499ad728566c6b2d31ebaacd4 in 1264ms, sequenceid=346, compaction requested=true 2024-11-23T13:22:19,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:19,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:19,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-23T13:22:19,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-23T13:22:19,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-23T13:22:19,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4170 sec 2024-11-23T13:22:19,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.4210 sec 2024-11-23T13:22:19,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:19,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:22:19,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:19,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:19,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:19,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:19,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:19,270 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:19,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a77e414c78d641bfb277ca16dfc0b8b0 is 50, key is test_row_0/A:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:19,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742182_1358 (size=12301) 2024-11-23T13:22:19,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a77e414c78d641bfb277ca16dfc0b8b0 2024-11-23T13:22:19,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b31d11f17c9247ae953405541b27a040 is 50, key is test_row_0/B:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742183_1359 (size=12301) 2024-11-23T13:22:19,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 360 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368199324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368199325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 362 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368199429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368199429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 364 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368199633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368199633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b31d11f17c9247ae953405541b27a040 2024-11-23T13:22:19,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/8a7c64041b1a4de6b231bc3ae8fd982e is 50, key is test_row_0/C:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:19,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742184_1360 (size=12301) 2024-11-23T13:22:19,905 DEBUG [Thread-1290 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:51875 2024-11-23T13:22:19,905 DEBUG [Thread-1288 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:51875 2024-11-23T13:22:19,905 DEBUG [Thread-1288 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:19,905 DEBUG [Thread-1290 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:19,906 DEBUG [Thread-1296 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:51875 2024-11-23T13:22:19,906 DEBUG [Thread-1296 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:19,907 DEBUG [Thread-1294 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:51875 2024-11-23T13:22:19,907 DEBUG [Thread-1294 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:19,908 DEBUG [Thread-1292 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:51875 2024-11-23T13:22:19,908 DEBUG [Thread-1292 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:19,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37458 deadline: 1732368199935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 366 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:37506 deadline: 1732368199935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-23T13:22:19,946 INFO [Thread-1287 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-23T13:22:20,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/8a7c64041b1a4de6b231bc3ae8fd982e 2024-11-23T13:22:20,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/a77e414c78d641bfb277ca16dfc0b8b0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0 2024-11-23T13:22:20,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0, entries=150, sequenceid=359, filesize=12.0 K 2024-11-23T13:22:20,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/b31d11f17c9247ae953405541b27a040 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040 2024-11-23T13:22:20,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040, entries=150, sequenceid=359, filesize=12.0 K 2024-11-23T13:22:20,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/8a7c64041b1a4de6b231bc3ae8fd982e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e 2024-11-23T13:22:20,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e, entries=150, sequenceid=359, filesize=12.0 K 2024-11-23T13:22:20,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for be94e75499ad728566c6b2d31ebaacd4 in 863ms, sequenceid=359, compaction requested=true 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:20,132 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store be94e75499ad728566c6b2d31ebaacd4:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:20,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:20,132 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T13:22:20,133 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T13:22:20,133 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T13:22:20,133 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/A is initiating minor compaction (all files) 2024-11-23T13:22:20,134 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/A in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/B is initiating minor compaction (all files) 2024-11-23T13:22:20,134 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/556e36a3228a4855867d2110efe4ad7f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=60.8 K 2024-11-23T13:22:20,134 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/B in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:20,134 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7fe4eb891264455ebd07b3b40a1e0c38, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=60.8 K 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 556e36a3228a4855867d2110efe4ad7f, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fe4eb891264455ebd07b3b40a1e0c38, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 623eb845659c42dba9b99ae959783e70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732368135244 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 421dfe5a6894403fa7e14960f7caf554, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732368135244 2024-11-23T13:22:20,134 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 905b865c012e488b9f1a595362dc5bab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368135889 2024-11-23T13:22:20,135 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 243b8fb2ad1748f19417fb8383fa62cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368135889 2024-11-23T13:22:20,135 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc7e210363c9477da8418f9bb8eaebf7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732368137048 2024-11-23T13:22:20,135 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e95de6c1b0842c5b2c4f2b2cd9ac1d5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732368137048 2024-11-23T13:22:20,135 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a77e414c78d641bfb277ca16dfc0b8b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732368138178 2024-11-23T13:22:20,135 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b31d11f17c9247ae953405541b27a040, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732368138178 2024-11-23T13:22:20,142 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#A#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:20,143 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:20,143 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/c9553f5b9bfc46a8b65fa1217163bb1a is 50, key is test_row_0/A:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:20,143 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3dede5b9e85945f6921ceba53caede0a is 50, key is test_row_0/B:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:20,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742186_1362 (size=13187) 2024-11-23T13:22:20,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742185_1361 (size=13187) 2024-11-23T13:22:20,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:20,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:20,438 DEBUG [Thread-1277 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:51875 2024-11-23T13:22:20,438 DEBUG [Thread-1277 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:20,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:20,442 DEBUG [Thread-1281 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:51875 2024-11-23T13:22:20,442 DEBUG [Thread-1281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:20,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/690b02a5692f4a818db88a1f088df98c is 50, key is test_row_0/A:col10/1732368140437/Put/seqid=0 2024-11-23T13:22:20,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742187_1363 (size=12301) 2024-11-23T13:22:20,551 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/3dede5b9e85945f6921ceba53caede0a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3dede5b9e85945f6921ceba53caede0a 2024-11-23T13:22:20,551 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/c9553f5b9bfc46a8b65fa1217163bb1a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/c9553f5b9bfc46a8b65fa1217163bb1a 2024-11-23T13:22:20,554 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/B of be94e75499ad728566c6b2d31ebaacd4 into 3dede5b9e85945f6921ceba53caede0a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:20,554 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/A of be94e75499ad728566c6b2d31ebaacd4 into c9553f5b9bfc46a8b65fa1217163bb1a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:20,554 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:20,554 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:20,554 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/A, priority=11, startTime=1732368140132; duration=0sec 2024-11-23T13:22:20,554 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/B, priority=11, startTime=1732368140132; duration=0sec 2024-11-23T13:22:20,554 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:20,554 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:A 2024-11-23T13:22:20,554 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:20,555 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:B 2024-11-23T13:22:20,555 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-23T13:22:20,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62221 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-23T13:22:20,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): be94e75499ad728566c6b2d31ebaacd4/C is initiating minor compaction (all files) 2024-11-23T13:22:20,556 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of be94e75499ad728566c6b2d31ebaacd4/C in TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:20,556 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b63580f2144b4124a33aa2f5d406a7f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp, totalSize=60.8 K 2024-11-23T13:22:20,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b63580f2144b4124a33aa2f5d406a7f3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1732368134885 2024-11-23T13:22:20,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea31157852e44b03a7263dd83d4412de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=308, earliestPutTs=1732368135244 2024-11-23T13:22:20,556 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 458ecaf1e6e849739d375e50124f23f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=322, earliestPutTs=1732368135889 2024-11-23T13:22:20,557 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2399ff80b4634830a2f9374e3ad5d4ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=346, earliestPutTs=1732368137048 2024-11-23T13:22:20,557 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a7c64041b1a4de6b231bc3ae8fd982e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732368138178 2024-11-23T13:22:20,566 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): be94e75499ad728566c6b2d31ebaacd4#C#compaction#314 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:20,566 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/7420b066e9ae436882380a640136de63 is 50, key is test_row_0/C:col10/1732368138189/Put/seqid=0 2024-11-23T13:22:20,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742188_1364 (size=13187) 2024-11-23T13:22:20,846 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/690b02a5692f4a818db88a1f088df98c 2024-11-23T13:22:20,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/2951a459f9664d0791c7bad353e377ac is 50, key is test_row_0/B:col10/1732368140437/Put/seqid=0 2024-11-23T13:22:20,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742189_1365 (size=12301) 2024-11-23T13:22:20,974 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/7420b066e9ae436882380a640136de63 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/7420b066e9ae436882380a640136de63 2024-11-23T13:22:20,978 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in be94e75499ad728566c6b2d31ebaacd4/C of be94e75499ad728566c6b2d31ebaacd4 into 7420b066e9ae436882380a640136de63(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:20,978 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:20,978 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4., storeName=be94e75499ad728566c6b2d31ebaacd4/C, priority=11, startTime=1732368140132; duration=0sec 2024-11-23T13:22:20,978 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:20,978 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: be94e75499ad728566c6b2d31ebaacd4:C 2024-11-23T13:22:21,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/2951a459f9664d0791c7bad353e377ac 2024-11-23T13:22:21,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a3c0029ff58442a5a16eda9c9c3555f7 is 50, key is test_row_0/C:col10/1732368140437/Put/seqid=0 2024-11-23T13:22:21,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742190_1366 (size=12301) 2024-11-23T13:22:21,666 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a3c0029ff58442a5a16eda9c9c3555f7 2024-11-23T13:22:21,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/690b02a5692f4a818db88a1f088df98c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/690b02a5692f4a818db88a1f088df98c 2024-11-23T13:22:21,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/690b02a5692f4a818db88a1f088df98c, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T13:22:21,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/2951a459f9664d0791c7bad353e377ac as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/2951a459f9664d0791c7bad353e377ac 2024-11-23T13:22:21,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/2951a459f9664d0791c7bad353e377ac, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T13:22:21,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/a3c0029ff58442a5a16eda9c9c3555f7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a3c0029ff58442a5a16eda9c9c3555f7 2024-11-23T13:22:21,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a3c0029ff58442a5a16eda9c9c3555f7, entries=150, sequenceid=383, filesize=12.0 K 2024-11-23T13:22:21,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=6.71 KB/6870 for be94e75499ad728566c6b2d31ebaacd4 in 1242ms, sequenceid=383, compaction requested=false 2024-11-23T13:22:21,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:29,256 DEBUG [Thread-1279 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:51875 2024-11-23T13:22:29,256 DEBUG [Thread-1279 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:29,261 DEBUG [Thread-1283 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:51875 2024-11-23T13:22:29,261 DEBUG [Thread-1283 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:29,298 DEBUG [Thread-1285 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:51875 2024-11-23T13:22:29,298 DEBUG [Thread-1285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 245 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 6 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 11 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8032 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7944 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7744 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8015 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7939 2024-11-23T13:22:29,299 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:22:29,299 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:22:29,299 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:51875 2024-11-23T13:22:29,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:29,299 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:22:29,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:22:29,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:29,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:29,302 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368149302"}]},"ts":"1732368149302"} 2024-11-23T13:22:29,303 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:22:29,305 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:22:29,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:22:29,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, UNASSIGN}] 2024-11-23T13:22:29,307 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, UNASSIGN 2024-11-23T13:22:29,307 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=be94e75499ad728566c6b2d31ebaacd4, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:29,308 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:22:29,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:29,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:29,460 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing be94e75499ad728566c6b2d31ebaacd4, disabling compactions & flushes 2024-11-23T13:22:29,460 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. after waiting 0 ms 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:29,460 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing be94e75499ad728566c6b2d31ebaacd4 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=A 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:29,460 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=B 2024-11-23T13:22:29,461 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:29,461 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK be94e75499ad728566c6b2d31ebaacd4, store=C 2024-11-23T13:22:29,461 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:29,464 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/6f8f6e722d3c43bd9c3cc6f27354f003 is 50, key is test_row_0/A:col10/1732368149255/Put/seqid=0 2024-11-23T13:22:29,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742191_1367 (size=9857) 2024-11-23T13:22:29,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:29,719 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:22:29,868 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/6f8f6e722d3c43bd9c3cc6f27354f003 2024-11-23T13:22:29,874 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/65e44a6b1327439582536c0050fac98f is 50, key is test_row_0/B:col10/1732368149255/Put/seqid=0 2024-11-23T13:22:29,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742192_1368 (size=9857) 2024-11-23T13:22:29,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:30,278 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/65e44a6b1327439582536c0050fac98f 2024-11-23T13:22:30,284 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/3b41377c48414e239c2dee98050e2591 is 50, key is test_row_0/C:col10/1732368149255/Put/seqid=0 2024-11-23T13:22:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742193_1369 (size=9857) 2024-11-23T13:22:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:30,688 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/3b41377c48414e239c2dee98050e2591 2024-11-23T13:22:30,691 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/A/6f8f6e722d3c43bd9c3cc6f27354f003 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/6f8f6e722d3c43bd9c3cc6f27354f003 2024-11-23T13:22:30,694 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/6f8f6e722d3c43bd9c3cc6f27354f003, entries=100, sequenceid=393, filesize=9.6 K 2024-11-23T13:22:30,695 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/B/65e44a6b1327439582536c0050fac98f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/65e44a6b1327439582536c0050fac98f 2024-11-23T13:22:30,698 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/65e44a6b1327439582536c0050fac98f, entries=100, sequenceid=393, filesize=9.6 K 2024-11-23T13:22:30,699 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/.tmp/C/3b41377c48414e239c2dee98050e2591 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/3b41377c48414e239c2dee98050e2591 2024-11-23T13:22:30,701 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/3b41377c48414e239c2dee98050e2591, entries=100, sequenceid=393, filesize=9.6 K 2024-11-23T13:22:30,702 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for be94e75499ad728566c6b2d31ebaacd4 in 1242ms, sequenceid=393, compaction requested=true 2024-11-23T13:22:30,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/0c6d26c9f54a4360908ec1a4634e32d8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e06402cbb64944c4a6895974b1c40c95, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/f84ed36ef4b647cfa5b2b828d0392019, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e6b8a1559d0b4f2a8705e18e5b6bb105, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d99c51be248941b09be4c4df7a80f287, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e921cb0fdaad4f9a898dfaba9a616a7f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/556e36a3228a4855867d2110efe4ad7f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0] to archive 2024-11-23T13:22:30,703 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:30,705 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b7ce3c9648e84167987d90f7777f2f76 2024-11-23T13:22:30,706 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a3a4262ad2664df986b21bc4b5a3379b 2024-11-23T13:22:30,707 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/0c6d26c9f54a4360908ec1a4634e32d8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/0c6d26c9f54a4360908ec1a4634e32d8 2024-11-23T13:22:30,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/b76edfe017a543e18c0963adc92b4063 2024-11-23T13:22:30,708 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3218c5e9e44c4c57be11d78d641d4727 2024-11-23T13:22:30,709 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e06402cbb64944c4a6895974b1c40c95 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e06402cbb64944c4a6895974b1c40c95 2024-11-23T13:22:30,710 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/ead6e8e1a3a146f28f904281fd2f2ff8 2024-11-23T13:22:30,711 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/2caec71ba84a46ce97767bd925a708d2 2024-11-23T13:22:30,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/f84ed36ef4b647cfa5b2b828d0392019 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/f84ed36ef4b647cfa5b2b828d0392019 2024-11-23T13:22:30,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/71645ef0676a416c807bfaec80e5a4c4 2024-11-23T13:22:30,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/1c7095810b9244f89da94d044b26ff7b 2024-11-23T13:22:30,714 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e6b8a1559d0b4f2a8705e18e5b6bb105 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e6b8a1559d0b4f2a8705e18e5b6bb105 2024-11-23T13:22:30,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/3dfb4e310e764b1289fdd6b3f3b17aaa 2024-11-23T13:22:30,716 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/fd884ff1523c417398fcf0ca5a781cf1 2024-11-23T13:22:30,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d99c51be248941b09be4c4df7a80f287 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d99c51be248941b09be4c4df7a80f287 2024-11-23T13:22:30,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e908d16326b145dba65fb7a7b1f16afc 2024-11-23T13:22:30,719 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/d4f7c81eea9441078a4092d59a5ffaf7 2024-11-23T13:22:30,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/9e79602dfc7f435cbf4b82c65f2334a6 2024-11-23T13:22:30,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e921cb0fdaad4f9a898dfaba9a616a7f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e921cb0fdaad4f9a898dfaba9a616a7f 2024-11-23T13:22:30,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/61ecbceeb1dd4bb48119d117c970bcb7 2024-11-23T13:22:30,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/e3650080f80d42fea4f58dd2c854f6b5 2024-11-23T13:22:30,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/556e36a3228a4855867d2110efe4ad7f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/556e36a3228a4855867d2110efe4ad7f 2024-11-23T13:22:30,724 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/623eb845659c42dba9b99ae959783e70 2024-11-23T13:22:30,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/905b865c012e488b9f1a595362dc5bab 2024-11-23T13:22:30,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/dc7e210363c9477da8418f9bb8eaebf7 2024-11-23T13:22:30,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/a77e414c78d641bfb277ca16dfc0b8b0 2024-11-23T13:22:30,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/10dc590f52fa45a1943dfdf27c945412, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/4f37750166b5408fb5022486bb641545, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/1016c5772a544bf4b69dc910accbd480, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f35fa450ffb443b83effe8a81c911a6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/902c456654b2475bad26f186c91dde5a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/fe42c14beddf4730beabdead36c823ea, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7fe4eb891264455ebd07b3b40a1e0c38, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040] to archive 2024-11-23T13:22:30,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:30,730 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7ec8fa4b11f64891996f9c429092f365 2024-11-23T13:22:30,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3656aa6b14a1475793fc8e595b05b49b 2024-11-23T13:22:30,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/10dc590f52fa45a1943dfdf27c945412 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/10dc590f52fa45a1943dfdf27c945412 2024-11-23T13:22:30,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f88f6e318534d229315caea664160a7 2024-11-23T13:22:30,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6d57b3e763b3462d849a590c25019692 2024-11-23T13:22:30,734 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/4f37750166b5408fb5022486bb641545 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/4f37750166b5408fb5022486bb641545 2024-11-23T13:22:30,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/72a22a63a6ce42ae9fecf123a0954adb 2024-11-23T13:22:30,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a5d8f10c735e4aca82be49fa2f2afddb 2024-11-23T13:22:30,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/1016c5772a544bf4b69dc910accbd480 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/1016c5772a544bf4b69dc910accbd480 2024-11-23T13:22:30,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/02f06ad4ce6e46dd870bc7dc19a8c2b7 2024-11-23T13:22:30,738 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/d78ea92880e3449c96438bbfa5007e9b 2024-11-23T13:22:30,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f35fa450ffb443b83effe8a81c911a6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3f35fa450ffb443b83effe8a81c911a6 2024-11-23T13:22:30,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/199d181a04c848e38e00fef04f5994e6 2024-11-23T13:22:30,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/36aeadafa94e47c9bb39e24ba10a37db 2024-11-23T13:22:30,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/902c456654b2475bad26f186c91dde5a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/902c456654b2475bad26f186c91dde5a 2024-11-23T13:22:30,743 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6c70235cdbd143c6baf097c3f85ada14 2024-11-23T13:22:30,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/0a044195d545490f8d68c87ba0fb565a 2024-11-23T13:22:30,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/fe42c14beddf4730beabdead36c823ea to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/fe42c14beddf4730beabdead36c823ea 2024-11-23T13:22:30,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b900df02610b4e708b0046de0f3e7735 2024-11-23T13:22:30,746 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/a204db3c00354527999733eb9410ed79 2024-11-23T13:22:30,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7fe4eb891264455ebd07b3b40a1e0c38 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/7fe4eb891264455ebd07b3b40a1e0c38 2024-11-23T13:22:30,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/e89ab1a840b94ea9927ea15715457e17 2024-11-23T13:22:30,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/421dfe5a6894403fa7e14960f7caf554 2024-11-23T13:22:30,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/243b8fb2ad1748f19417fb8383fa62cd 2024-11-23T13:22:30,750 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/6e95de6c1b0842c5b2c4f2b2cd9ac1d5 2024-11-23T13:22:30,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/b31d11f17c9247ae953405541b27a040 2024-11-23T13:22:30,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/aae4cea7c977492c97c2f01747b53fd8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/80dab130c03e4abab8d3f5c5131efacd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/e6e7df10a6324aa2bbc20e076375dee1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc26ecba3e3c4c45b01c289b8dc5f9fa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/18a275f78be84c39b77491a3bcd2374a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ae1aa846ca304976acfa903ca92c32dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b63580f2144b4124a33aa2f5d406a7f3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e] to archive 2024-11-23T13:22:30,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:30,754 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a1ce11ce29bc4f8db1e3b964f6361e74 2024-11-23T13:22:30,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/1f30b48f63094b6da57f39306affb2cb 2024-11-23T13:22:30,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/aae4cea7c977492c97c2f01747b53fd8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/aae4cea7c977492c97c2f01747b53fd8 2024-11-23T13:22:30,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/554776b22ac349a8a2ae4152d43581bd 2024-11-23T13:22:30,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/f619aeaa001341b09f238131e7049a65 2024-11-23T13:22:30,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/80dab130c03e4abab8d3f5c5131efacd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/80dab130c03e4abab8d3f5c5131efacd 2024-11-23T13:22:30,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ff6ce2a87aa14b8e8e514fb097b6fa80 2024-11-23T13:22:30,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/650bb67cf0c3409dbaf423231f08123f 2024-11-23T13:22:30,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/e6e7df10a6324aa2bbc20e076375dee1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/e6e7df10a6324aa2bbc20e076375dee1 2024-11-23T13:22:30,761 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/9c859e43158b4f92a1dc942557defb95 2024-11-23T13:22:30,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b533eeaa89e9449093a1eaad85384513 2024-11-23T13:22:30,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc26ecba3e3c4c45b01c289b8dc5f9fa to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc26ecba3e3c4c45b01c289b8dc5f9fa 2024-11-23T13:22:30,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/4767b8f0a6ab4123bcce5303ba5d020c 2024-11-23T13:22:30,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/72e8b5f7ee7c496f98524708def9a9ac 2024-11-23T13:22:30,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/18a275f78be84c39b77491a3bcd2374a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/18a275f78be84c39b77491a3bcd2374a 2024-11-23T13:22:30,767 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d571b3d022d40f7bb216a7670e9cee8 2024-11-23T13:22:30,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a008f5367a384d019722274fd996e2f1 2024-11-23T13:22:30,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ae1aa846ca304976acfa903ca92c32dd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ae1aa846ca304976acfa903ca92c32dd 2024-11-23T13:22:30,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/38d3f5ce36d84205899dfb59d5c67a7d 2024-11-23T13:22:30,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/5d490c12c2ca486b89986d5e5c13e502 2024-11-23T13:22:30,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b63580f2144b4124a33aa2f5d406a7f3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/b63580f2144b4124a33aa2f5d406a7f3 2024-11-23T13:22:30,772 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/bc448575177842e38d8c0cd02a2f54a5 2024-11-23T13:22:30,773 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/ea31157852e44b03a7263dd83d4412de 2024-11-23T13:22:30,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/458ecaf1e6e849739d375e50124f23f9 2024-11-23T13:22:30,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/2399ff80b4634830a2f9374e3ad5d4ed 2024-11-23T13:22:30,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/8a7c64041b1a4de6b231bc3ae8fd982e 2024-11-23T13:22:30,779 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/recovered.edits/396.seqid, newMaxSeqId=396, maxSeqId=1 2024-11-23T13:22:30,780 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4. 2024-11-23T13:22:30,780 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for be94e75499ad728566c6b2d31ebaacd4: 2024-11-23T13:22:30,781 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:30,782 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=be94e75499ad728566c6b2d31ebaacd4, regionState=CLOSED 2024-11-23T13:22:30,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-23T13:22:30,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure be94e75499ad728566c6b2d31ebaacd4, server=ba2e440802a7,33173,1732368061317 in 1.4740 sec 2024-11-23T13:22:30,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-23T13:22:30,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=be94e75499ad728566c6b2d31ebaacd4, UNASSIGN in 1.4770 sec 2024-11-23T13:22:30,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T13:22:30,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4800 sec 2024-11-23T13:22:30,787 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368150787"}]},"ts":"1732368150787"} 2024-11-23T13:22:30,787 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:22:30,789 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:22:30,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4900 sec 2024-11-23T13:22:31,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:22:31,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-23T13:22:31,405 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-23T13:22:31,406 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:22:31,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,407 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T13:22:31,408 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,409 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:31,411 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/recovered.edits] 2024-11-23T13:22:31,413 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/690b02a5692f4a818db88a1f088df98c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/690b02a5692f4a818db88a1f088df98c 2024-11-23T13:22:31,414 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/6f8f6e722d3c43bd9c3cc6f27354f003 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/6f8f6e722d3c43bd9c3cc6f27354f003 2024-11-23T13:22:31,415 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/c9553f5b9bfc46a8b65fa1217163bb1a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/A/c9553f5b9bfc46a8b65fa1217163bb1a 2024-11-23T13:22:31,417 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/2951a459f9664d0791c7bad353e377ac to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/2951a459f9664d0791c7bad353e377ac 2024-11-23T13:22:31,417 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3dede5b9e85945f6921ceba53caede0a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/3dede5b9e85945f6921ceba53caede0a 2024-11-23T13:22:31,418 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/65e44a6b1327439582536c0050fac98f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/B/65e44a6b1327439582536c0050fac98f 2024-11-23T13:22:31,420 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/3b41377c48414e239c2dee98050e2591 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/3b41377c48414e239c2dee98050e2591 2024-11-23T13:22:31,421 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/7420b066e9ae436882380a640136de63 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/7420b066e9ae436882380a640136de63 2024-11-23T13:22:31,422 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a3c0029ff58442a5a16eda9c9c3555f7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/C/a3c0029ff58442a5a16eda9c9c3555f7 2024-11-23T13:22:31,424 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/recovered.edits/396.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4/recovered.edits/396.seqid 2024-11-23T13:22:31,424 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/be94e75499ad728566c6b2d31ebaacd4 2024-11-23T13:22:31,424 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:22:31,426 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,431 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:22:31,432 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:22:31,433 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,433 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:22:31,433 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368151433"}]},"ts":"9223372036854775807"} 2024-11-23T13:22:31,435 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:22:31,435 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => be94e75499ad728566c6b2d31ebaacd4, NAME => 'TestAcidGuarantees,,1732368117701.be94e75499ad728566c6b2d31ebaacd4.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:22:31,435 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:22:31,435 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368151435"}]},"ts":"9223372036854775807"} 2024-11-23T13:22:31,436 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:22:31,438 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 32 msec 2024-11-23T13:22:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-23T13:22:31,509 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-23T13:22:31,518 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=237 (was 247), OpenFileDescriptor=447 (was 462), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=327 (was 351), ProcessCount=11 (was 11), AvailableMemoryMB=3709 (was 3750) 2024-11-23T13:22:31,526 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=237, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=327, ProcessCount=11, AvailableMemoryMB=3709 2024-11-23T13:22:31,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:22:31,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:22:31,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:31,529 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:22:31,529 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:31,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-11-23T13:22:31,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:31,530 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:22:31,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742194_1370 (size=960) 2024-11-23T13:22:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:31,937 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:22:31,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742195_1371 (size=53) 2024-11-23T13:22:32,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:32,342 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:22:32,343 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 2d79dbed6dcd0da8c65ba796b67996f6, disabling compactions & flushes 2024-11-23T13:22:32,343 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,343 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,343 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. after waiting 0 ms 2024-11-23T13:22:32,343 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,343 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,343 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:32,344 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:22:32,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368152344"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368152344"}]},"ts":"1732368152344"} 2024-11-23T13:22:32,345 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:22:32,345 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:22:32,345 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368152345"}]},"ts":"1732368152345"} 2024-11-23T13:22:32,346 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:22:32,350 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, ASSIGN}] 2024-11-23T13:22:32,350 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, ASSIGN 2024-11-23T13:22:32,351 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:22:32,501 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:32,502 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:32,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:32,655 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,655 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:22:32,656 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,656 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:22:32,656 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,656 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,657 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,658 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:32,658 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName A 2024-11-23T13:22:32,658 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:32,659 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:32,659 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,659 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:32,660 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName B 2024-11-23T13:22:32,660 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:32,660 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:32,660 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,661 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:32,661 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName C 2024-11-23T13:22:32,661 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:32,661 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:32,661 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,662 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,662 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,663 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:22:32,664 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:32,665 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:22:32,666 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened 2d79dbed6dcd0da8c65ba796b67996f6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72796658, jitterRate=0.08475473523139954}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:22:32,666 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:32,667 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., pid=96, masterSystemTime=1732368152653 2024-11-23T13:22:32,668 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,668 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:32,668 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:32,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-23T13:22:32,670 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 in 167 msec 2024-11-23T13:22:32,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-23T13:22:32,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, ASSIGN in 320 msec 2024-11-23T13:22:32,672 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:22:32,672 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368152672"}]},"ts":"1732368152672"} 2024-11-23T13:22:32,673 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:22:32,675 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:22:32,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-11-23T13:22:33,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-23T13:22:33,634 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-11-23T13:22:33,635 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-11-23T13:22:33,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:33,640 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:33,641 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:33,642 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:22:33,643 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:22:33,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:22:33,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:22:33,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:33,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742196_1372 (size=996) 2024-11-23T13:22:34,055 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T13:22:34,055 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T13:22:34,057 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:22:34,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, REOPEN/MOVE}] 2024-11-23T13:22:34,059 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, REOPEN/MOVE 2024-11-23T13:22:34,059 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,060 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:22:34,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:34,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,212 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,212 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:22:34,212 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing 2d79dbed6dcd0da8c65ba796b67996f6, disabling compactions & flushes 2024-11-23T13:22:34,212 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,212 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,212 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. after waiting 0 ms 2024-11-23T13:22:34,212 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,216 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T13:22:34,216 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,216 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:34,216 WARN [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: 2d79dbed6dcd0da8c65ba796b67996f6 to self. 2024-11-23T13:22:34,217 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,218 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=CLOSED 2024-11-23T13:22:34,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-23T13:22:34,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 in 159 msec 2024-11-23T13:22:34,220 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, REOPEN/MOVE; state=CLOSED, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=true 2024-11-23T13:22:34,371 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:34,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,526 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,526 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:22:34,526 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,526 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:22:34,526 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,526 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,527 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,528 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:34,528 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName A 2024-11-23T13:22:34,529 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:34,530 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:34,530 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,531 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:34,531 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName B 2024-11-23T13:22:34,531 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:34,531 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:34,531 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,532 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:34,532 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2d79dbed6dcd0da8c65ba796b67996f6 columnFamilyName C 2024-11-23T13:22:34,532 DEBUG [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:34,532 INFO [StoreOpener-2d79dbed6dcd0da8c65ba796b67996f6-1 {}] regionserver.HStore(327): Store=2d79dbed6dcd0da8c65ba796b67996f6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:34,532 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,533 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,533 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,534 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:22:34,535 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,536 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened 2d79dbed6dcd0da8c65ba796b67996f6; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59922850, jitterRate=-0.10707995295524597}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:22:34,537 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:34,537 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., pid=101, masterSystemTime=1732368154523 2024-11-23T13:22:34,539 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,539 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,539 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=OPEN, openSeqNum=5, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-11-23T13:22:34,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 in 168 msec 2024-11-23T13:22:34,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-23T13:22:34,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, REOPEN/MOVE in 483 msec 2024-11-23T13:22:34,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-23T13:22:34,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 485 msec 2024-11-23T13:22:34,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 899 msec 2024-11-23T13:22:34,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-23T13:22:34,546 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3637e4c6 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51f7d511 2024-11-23T13:22:34,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b14fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-11-23T13:22:34,555 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,556 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-11-23T13:22:34,559 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,559 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-11-23T13:22:34,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,563 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-11-23T13:22:34,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,566 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-11-23T13:22:34,569 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,570 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-11-23T13:22:34,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,573 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-11-23T13:22:34,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,577 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-11-23T13:22:34,580 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,581 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-11-23T13:22:34,584 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:22:34,588 DEBUG [hconnection-0x74637d2d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,588 DEBUG [hconnection-0x12bffb0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,588 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:34,589 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,589 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41158, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,589 DEBUG [hconnection-0x6b2237d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,589 DEBUG [hconnection-0x1ec0c5d2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,589 DEBUG [hconnection-0x66ed6089-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-23T13:22:34,589 DEBUG [hconnection-0x73d6925-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,590 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,590 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:34,590 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,590 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41216, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,591 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:34,591 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:34,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:34,595 DEBUG [hconnection-0x4e45fa7c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,595 DEBUG [hconnection-0x8d58b04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,596 DEBUG [hconnection-0x5d239b0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,596 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,597 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,597 DEBUG [hconnection-0x59f3b5c3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:22:34,598 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:22:34,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:34,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:34,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:34,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368214623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368214624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368214623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368214625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368214625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112393c31432cc324b19aaa3c46cff0193ce_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368154599/Put/seqid=0 2024-11-23T13:22:34,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742197_1373 (size=12154) 2024-11-23T13:22:34,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:34,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368214726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368214726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368214726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368214727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368214727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,743 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:34,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:34,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:34,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:34,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:34,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368214930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368214931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368214932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368214933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:34,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:34,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368214933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,049 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:35,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,053 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112393c31432cc324b19aaa3c46cff0193ce_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112393c31432cc324b19aaa3c46cff0193ce_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:35,054 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f0cca3de620446ff8a29962a05faf22e, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:35,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f0cca3de620446ff8a29962a05faf22e is 175, key is test_row_0/A:col10/1732368154599/Put/seqid=0 2024-11-23T13:22:35,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742198_1374 (size=30955) 2024-11-23T13:22:35,060 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f0cca3de620446ff8a29962a05faf22e 2024-11-23T13:22:35,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/545c12a9da4e43b48528a109bf514a4c is 50, key is test_row_0/B:col10/1732368154599/Put/seqid=0 2024-11-23T13:22:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742199_1375 (size=12001) 2024-11-23T13:22:35,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/545c12a9da4e43b48528a109bf514a4c 2024-11-23T13:22:35,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/01cdf1b1d8b14eef87fc2d905754ea0f is 50, key is test_row_0/C:col10/1732368154599/Put/seqid=0 2024-11-23T13:22:35,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742200_1376 (size=12001) 2024-11-23T13:22:35,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:35,202 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368215235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368215236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368215236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368215237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368215237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:35,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:35,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,508 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:35,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/01cdf1b1d8b14eef87fc2d905754ea0f 2024-11-23T13:22:35,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f0cca3de620446ff8a29962a05faf22e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e 2024-11-23T13:22:35,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e, entries=150, sequenceid=17, filesize=30.2 K 2024-11-23T13:22:35,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/545c12a9da4e43b48528a109bf514a4c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c 2024-11-23T13:22:35,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c, entries=150, sequenceid=17, filesize=11.7 K 2024-11-23T13:22:35,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/01cdf1b1d8b14eef87fc2d905754ea0f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f 2024-11-23T13:22:35,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f, entries=150, sequenceid=17, filesize=11.7 K 2024-11-23T13:22:35,547 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 2d79dbed6dcd0da8c65ba796b67996f6 in 947ms, sequenceid=17, compaction requested=false 2024-11-23T13:22:35,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:35,660 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-23T13:22:35,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:35,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:35,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:35,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:35,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:35,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:35,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e0a515631d7b4288960d9d9c33c9c757_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368154624/Put/seqid=0 2024-11-23T13:22:35,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742201_1377 (size=12154) 2024-11-23T13:22:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:35,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:35,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:35,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368215749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,755 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368215750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368215752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368215753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,757 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368215753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368215856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368215856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368215857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368215857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:35,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:35,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368215858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368216060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368216061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368216062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368216063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368216063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:36,077 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e0a515631d7b4288960d9d9c33c9c757_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e0a515631d7b4288960d9d9c33c9c757_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:36,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/4410be1b40c94d3eb8d9cfd96371a2b1, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:36,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/4410be1b40c94d3eb8d9cfd96371a2b1 is 175, key is test_row_0/A:col10/1732368154624/Put/seqid=0 2024-11-23T13:22:36,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742202_1378 (size=30955) 2024-11-23T13:22:36,312 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:22:36,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368216365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368216366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368216366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368216367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368216368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,486 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/4410be1b40c94d3eb8d9cfd96371a2b1 2024-11-23T13:22:36,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/dadc375535324e7f888f41fabeafc4d5 is 50, key is test_row_0/B:col10/1732368154624/Put/seqid=0 2024-11-23T13:22:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742203_1379 (size=12001) 2024-11-23T13:22:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:36,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368216871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368216872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368216872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368216872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368216873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:36,899 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/dadc375535324e7f888f41fabeafc4d5 2024-11-23T13:22:36,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/5a8ad1fa81d741bb83e2c9e223a4b766 is 50, key is test_row_0/C:col10/1732368154624/Put/seqid=0 2024-11-23T13:22:36,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742204_1380 (size=12001) 2024-11-23T13:22:36,912 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/5a8ad1fa81d741bb83e2c9e223a4b766 2024-11-23T13:22:36,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/4410be1b40c94d3eb8d9cfd96371a2b1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1 2024-11-23T13:22:36,920 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T13:22:36,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/dadc375535324e7f888f41fabeafc4d5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5 2024-11-23T13:22:36,925 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T13:22:36,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/5a8ad1fa81d741bb83e2c9e223a4b766 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766 2024-11-23T13:22:36,929 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T13:22:36,930 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1269ms, sequenceid=41, compaction requested=false 2024-11-23T13:22:36,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:36,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:36,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-23T13:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-23T13:22:36,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-23T13:22:36,934 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3400 sec 2024-11-23T13:22:36,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 2.3460 sec 2024-11-23T13:22:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:37,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:37,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:37,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235b7127de50e245a792fcb9a14d9861b1_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742205_1381 (size=12154) 2024-11-23T13:22:37,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368217905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:37,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368217910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:37,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368217911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:37,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368217914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:37,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:37,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368217915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368218015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368218018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368218018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368218020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368218022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368218220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368218223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368218223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368218225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368218226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,311 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:38,315 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411235b7127de50e245a792fcb9a14d9861b1_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b7127de50e245a792fcb9a14d9861b1_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:38,316 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/5a68c595ceb04b4f8f5f219a3db4e846, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:38,316 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/5a68c595ceb04b4f8f5f219a3db4e846 is 175, key is test_row_0/A:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:38,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742206_1382 (size=30955) 2024-11-23T13:22:38,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368218525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368218528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368218529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368218530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368218532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-23T13:22:38,696 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-23T13:22:38,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:38,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-23T13:22:38,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:38,699 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:38,700 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:38,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:38,721 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/5a68c595ceb04b4f8f5f219a3db4e846 2024-11-23T13:22:38,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/8db34368b35e442c8617f14c44c29cb6 is 50, key is test_row_0/B:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742207_1383 (size=12001) 2024-11-23T13:22:38,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:38,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:38,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:38,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:38,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:38,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:38,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:38,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:38,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:39,004 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:39,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:39,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,005 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:39,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368219030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:39,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368219035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:39,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368219035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368219036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:39,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368219039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/8db34368b35e442c8617f14c44c29cb6 2024-11-23T13:22:39,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/eb79d56b597c4cd2b0883aae2d4b2efa is 50, key is test_row_0/C:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:39,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742208_1384 (size=12001) 2024-11-23T13:22:39,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:39,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:39,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:39,310 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:39,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:39,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,310 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,462 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:39,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:39,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,463 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:39,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/eb79d56b597c4cd2b0883aae2d4b2efa 2024-11-23T13:22:39,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/5a68c595ceb04b4f8f5f219a3db4e846 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846 2024-11-23T13:22:39,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846, entries=150, sequenceid=55, filesize=30.2 K 2024-11-23T13:22:39,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/8db34368b35e442c8617f14c44c29cb6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6 2024-11-23T13:22:39,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6, entries=150, sequenceid=55, filesize=11.7 K 2024-11-23T13:22:39,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/eb79d56b597c4cd2b0883aae2d4b2efa as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa 2024-11-23T13:22:39,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa, entries=150, sequenceid=55, filesize=11.7 K 2024-11-23T13:22:39,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1689ms, sequenceid=55, compaction requested=true 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:39,569 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:39,569 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:39,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:39,570 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:39,570 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:39,570 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,570 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:39,570 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=90.7 K 2024-11-23T13:22:39,570 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,570 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:39,570 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846] 2024-11-23T13:22:39,570 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,570 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.2 K 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f0cca3de620446ff8a29962a05faf22e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732368154599 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 545c12a9da4e43b48528a109bf514a4c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732368154599 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4410be1b40c94d3eb8d9cfd96371a2b1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368154622 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting dadc375535324e7f888f41fabeafc4d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368154622 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a68c595ceb04b4f8f5f219a3db4e846, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:39,571 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8db34368b35e442c8617f14c44c29cb6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:39,578 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:39,580 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#330 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:39,580 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/cd1f5198a76f4c66ac5224d4561c11ae is 50, key is test_row_0/B:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:39,581 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411231581e7619389470e916bf2affc63baa6_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:39,583 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411231581e7619389470e916bf2affc63baa6_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:39,583 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411231581e7619389470e916bf2affc63baa6_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:39,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742210_1386 (size=4469) 2024-11-23T13:22:39,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742209_1385 (size=12104) 2024-11-23T13:22:39,615 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:39,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-23T13:22:39,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,617 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:39,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:39,628 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/cd1f5198a76f4c66ac5224d4561c11ae as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/cd1f5198a76f4c66ac5224d4561c11ae 2024-11-23T13:22:39,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112354bd1a666b1f49ecb6c0a2e544242b2d_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368157913/Put/seqid=0 2024-11-23T13:22:39,639 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into cd1f5198a76f4c66ac5224d4561c11ae(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:39,639 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:39,639 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368159569; duration=0sec 2024-11-23T13:22:39,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:39,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:39,640 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:39,641 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:39,642 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:39,642 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,642 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.2 K 2024-11-23T13:22:39,643 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01cdf1b1d8b14eef87fc2d905754ea0f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732368154599 2024-11-23T13:22:39,643 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a8ad1fa81d741bb83e2c9e223a4b766, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368154622 2024-11-23T13:22:39,643 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb79d56b597c4cd2b0883aae2d4b2efa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:39,657 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#332 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:39,658 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9e3d5f077a154eae956c27339fd3fafd is 50, key is test_row_0/C:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:39,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742212_1388 (size=12104) 2024-11-23T13:22:39,675 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9e3d5f077a154eae956c27339fd3fafd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e3d5f077a154eae956c27339fd3fafd 2024-11-23T13:22:39,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742211_1387 (size=12154) 2024-11-23T13:22:39,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,688 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112354bd1a666b1f49ecb6c0a2e544242b2d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354bd1a666b1f49ecb6c0a2e544242b2d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:39,688 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into 9e3d5f077a154eae956c27339fd3fafd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:39,689 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:39,689 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368159569; duration=0sec 2024-11-23T13:22:39,689 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:39,689 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:39,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/e7cdc8b9886a4027a21d37a47ec27b71, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:39,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/e7cdc8b9886a4027a21d37a47ec27b71 is 175, key is test_row_0/A:col10/1732368157913/Put/seqid=0 2024-11-23T13:22:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742213_1389 (size=30955) 2024-11-23T13:22:39,717 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/e7cdc8b9886a4027a21d37a47ec27b71 2024-11-23T13:22:39,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/1b069bcb576443c5b7024953bb6d0919 is 50, key is test_row_0/B:col10/1732368157913/Put/seqid=0 2024-11-23T13:22:39,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742214_1390 (size=12001) 2024-11-23T13:22:39,739 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/1b069bcb576443c5b7024953bb6d0919 2024-11-23T13:22:39,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/7e4dc703f5154f5e9c2bb7986bc1cac5 is 50, key is test_row_0/C:col10/1732368157913/Put/seqid=0 2024-11-23T13:22:39,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742215_1391 (size=12001) 2024-11-23T13:22:39,772 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/7e4dc703f5154f5e9c2bb7986bc1cac5 2024-11-23T13:22:39,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/e7cdc8b9886a4027a21d37a47ec27b71 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71 2024-11-23T13:22:39,787 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71, entries=150, sequenceid=78, filesize=30.2 K 2024-11-23T13:22:39,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/1b069bcb576443c5b7024953bb6d0919 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919 2024-11-23T13:22:39,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,796 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:22:39,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/7e4dc703f5154f5e9c2bb7986bc1cac5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5 2024-11-23T13:22:39,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,801 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:22:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:39,802 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 2d79dbed6dcd0da8c65ba796b67996f6 in 185ms, sequenceid=78, compaction requested=false 2024-11-23T13:22:39,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:39,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:39,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-23T13:22:39,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-23T13:22:39,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-23T13:22:39,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1040 sec 2024-11-23T13:22:39,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.1090 sec 2024-11-23T13:22:39,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,992 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#329 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,992 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/6ad5fc9711d5445a9a84608e88af064b is 175, key is test_row_0/A:col10/1732368155752/Put/seqid=0 2024-11-23T13:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742216_1392 (size=31058) 2024-11-23T13:22:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,002 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/6ad5fc9711d5445a9a84608e88af064b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b 2024-11-23T13:22:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,007 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 6ad5fc9711d5445a9a84608e88af064b(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,007 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:40,007 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368159569; duration=0sec 2024-11-23T13:22:40,007 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:40,007 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:40,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:22:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:40,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,179 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233d6ff8412afd4e6a93588838eeb1b5f5_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:40,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742218_1394 (size=24358) 2024-11-23T13:22:40,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368220225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368220228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368220229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368220235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368220235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368220336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368220338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368220339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368220345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368220346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368220543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368220546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,551 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368220547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368220551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368220552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,597 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:40,600 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411233d6ff8412afd4e6a93588838eeb1b5f5_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d6ff8412afd4e6a93588838eeb1b5f5_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:40,601 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/7d77399252234e8daa60185c03a773e5, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:40,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/7d77399252234e8daa60185c03a773e5 is 175, key is test_row_0/A:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:40,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742217_1393 (size=73995) 2024-11-23T13:22:40,605 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/7d77399252234e8daa60185c03a773e5 2024-11-23T13:22:40,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/ecc6751428b54d979bb790b6166b05b4 is 50, key is test_row_0/B:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:40,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742219_1395 (size=12001) 2024-11-23T13:22:40,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-23T13:22:40,803 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-23T13:22:40,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-23T13:22:40,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:40,806 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:40,806 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:40,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:40,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368220848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368220853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368220853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368220853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:40,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368220854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:40,958 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:40,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T13:22:40,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:40,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:40,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:40,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:40,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:40,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,022 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/ecc6751428b54d979bb790b6166b05b4 2024-11-23T13:22:41,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/b2dd7e51ed034f54aa82d5a7fbb3a728 is 50, key is test_row_0/C:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:41,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742220_1396 (size=12001) 2024-11-23T13:22:41,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:22:41,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T13:22:41,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:41,111 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T13:22:41,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:41,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T13:22:41,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:41,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:41,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368221353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368221358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368221359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:41,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368221359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:41,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368221361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:41,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T13:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:41,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:41,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/b2dd7e51ed034f54aa82d5a7fbb3a728 2024-11-23T13:22:41,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/7d77399252234e8daa60185c03a773e5 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5 2024-11-23T13:22:41,444 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5, entries=400, sequenceid=92, filesize=72.3 K 2024-11-23T13:22:41,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/ecc6751428b54d979bb790b6166b05b4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4 2024-11-23T13:22:41,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4, entries=150, sequenceid=92, filesize=11.7 K 2024-11-23T13:22:41,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/b2dd7e51ed034f54aa82d5a7fbb3a728 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728 2024-11-23T13:22:41,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728, entries=150, sequenceid=92, filesize=11.7 K 2024-11-23T13:22:41,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1283ms, sequenceid=92, compaction requested=true 2024-11-23T13:22:41,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:41,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136008 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:41,453 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:41,453 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,453 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,454 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/cd1f5198a76f4c66ac5224d4561c11ae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.3 K 2024-11-23T13:22:41,454 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=132.8 K 2024-11-23T13:22:41,454 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,454 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5] 2024-11-23T13:22:41,455 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting cd1f5198a76f4c66ac5224d4561c11ae, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:41,455 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ad5fc9711d5445a9a84608e88af064b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:41,455 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b069bcb576443c5b7024953bb6d0919, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368157902 2024-11-23T13:22:41,456 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7cdc8b9886a4027a21d37a47ec27b71, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368157902 2024-11-23T13:22:41,456 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ecc6751428b54d979bb790b6166b05b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160153 2024-11-23T13:22:41,456 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d77399252234e8daa60185c03a773e5, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160119 2024-11-23T13:22:41,462 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:41,463 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#338 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:41,464 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112311858f1e1e7946398c38b4d4b73041f5_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:41,464 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/63f1509c5b6a477bafd83ff1deff2513 is 50, key is test_row_0/B:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:41,467 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112311858f1e1e7946398c38b4d4b73041f5_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:41,467 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112311858f1e1e7946398c38b4d4b73041f5_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:41,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742221_1397 (size=12207) 2024-11-23T13:22:41,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742222_1398 (size=4469) 2024-11-23T13:22:41,480 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#339 average throughput is 1.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:41,480 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3571bcc388d947dcbf9970a46284894a is 175, key is test_row_0/A:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:41,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742223_1399 (size=31161) 2024-11-23T13:22:41,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:41,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-23T13:22:41,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,571 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:41,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:41,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236b96aab086ad40688430ecab87b6969c_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368160234/Put/seqid=0 2024-11-23T13:22:41,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742224_1400 (size=12154) 2024-11-23T13:22:41,877 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/63f1509c5b6a477bafd83ff1deff2513 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/63f1509c5b6a477bafd83ff1deff2513 2024-11-23T13:22:41,882 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into 63f1509c5b6a477bafd83ff1deff2513(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:41,882 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:41,882 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368161453; duration=0sec 2024-11-23T13:22:41,882 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:41,882 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:41,882 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:41,883 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:41,883 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:41,883 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:41,883 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e3d5f077a154eae956c27339fd3fafd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.3 K 2024-11-23T13:22:41,883 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e3d5f077a154eae956c27339fd3fafd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732368155752 2024-11-23T13:22:41,884 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e4dc703f5154f5e9c2bb7986bc1cac5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368157902 2024-11-23T13:22:41,884 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b2dd7e51ed034f54aa82d5a7fbb3a728, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160153 2024-11-23T13:22:41,889 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3571bcc388d947dcbf9970a46284894a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a 2024-11-23T13:22:41,892 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#341 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:41,892 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/400b139fc21c43aa84333d452e7fe474 is 50, key is test_row_0/C:col10/1732368160165/Put/seqid=0 2024-11-23T13:22:41,893 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 3571bcc388d947dcbf9970a46284894a(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:41,893 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:41,894 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368161452; duration=0sec 2024-11-23T13:22:41,894 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:41,894 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:41,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742225_1401 (size=12207) 2024-11-23T13:22:41,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:41,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:41,985 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236b96aab086ad40688430ecab87b6969c_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b96aab086ad40688430ecab87b6969c_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:41,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/ffa8a9fe87f04390939e2a03f1e6fc32, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:41,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/ffa8a9fe87f04390939e2a03f1e6fc32 is 175, key is test_row_0/A:col10/1732368160234/Put/seqid=0 2024-11-23T13:22:41,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742226_1402 (size=30955) 2024-11-23T13:22:42,301 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/400b139fc21c43aa84333d452e7fe474 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/400b139fc21c43aa84333d452e7fe474 2024-11-23T13:22:42,305 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into 400b139fc21c43aa84333d452e7fe474(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:42,305 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:42,305 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368161453; duration=0sec 2024-11-23T13:22:42,306 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:42,306 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:42,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:42,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:42,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368222371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368222372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368222376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368222377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368222378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,391 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/ffa8a9fe87f04390939e2a03f1e6fc32 2024-11-23T13:22:42,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/aca1994bcc474cef8cb7adbfa289ed06 is 50, key is test_row_0/B:col10/1732368160234/Put/seqid=0 2024-11-23T13:22:42,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742227_1403 (size=12001) 2024-11-23T13:22:42,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368222477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368222478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368222482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368222482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368222483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368222681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368222682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368222686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368222687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368222687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:42,806 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/aca1994bcc474cef8cb7adbfa289ed06 2024-11-23T13:22:42,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8abab0b83a4149cfae824dfa33e29971 is 50, key is test_row_0/C:col10/1732368160234/Put/seqid=0 2024-11-23T13:22:42,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742228_1404 (size=12001) 2024-11-23T13:22:42,820 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8abab0b83a4149cfae824dfa33e29971 2024-11-23T13:22:42,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/ffa8a9fe87f04390939e2a03f1e6fc32 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32 2024-11-23T13:22:42,829 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32, entries=150, sequenceid=118, filesize=30.2 K 2024-11-23T13:22:42,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/aca1994bcc474cef8cb7adbfa289ed06 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06 2024-11-23T13:22:42,834 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T13:22:42,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8abab0b83a4149cfae824dfa33e29971 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971 2024-11-23T13:22:42,838 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971, entries=150, sequenceid=118, filesize=11.7 K 2024-11-23T13:22:42,838 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1268ms, sequenceid=118, compaction requested=false 2024-11-23T13:22:42,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:42,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:42,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-23T13:22:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-23T13:22:42,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-23T13:22:42,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0340 sec 2024-11-23T13:22:42,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.0370 sec 2024-11-23T13:22:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-23T13:22:42,909 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-23T13:22:42,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-23T13:22:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:42,912 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:42,912 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:42,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:42,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:42,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:42,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:42,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230161f5ccdf754816aeb4ca2f530a9080_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:43,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742229_1405 (size=14744) 2024-11-23T13:22:43,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:43,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368223018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368223018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368223019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368223023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368223024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,064 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368223125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368223125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368223125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368223129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368223133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:43,217 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368223329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368223330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368223331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368223334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368223336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,370 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,371 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,404 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:43,407 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230161f5ccdf754816aeb4ca2f530a9080_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230161f5ccdf754816aeb4ca2f530a9080_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:43,408 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/75c0769952d3426995c86b7f95f725b8, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:43,409 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/75c0769952d3426995c86b7f95f725b8 is 175, key is test_row_0/A:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:43,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742230_1406 (size=39699) 2024-11-23T13:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:43,523 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368223634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368223635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368223637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368223641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:43,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368223644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,814 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/75c0769952d3426995c86b7f95f725b8 2024-11-23T13:22:43,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/424db21ff0a74a28ac89d322558fd3cf is 50, key is test_row_0/B:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:43,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742231_1407 (size=12101) 2024-11-23T13:22:43,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:43,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:43,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:43,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:43,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:44,134 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:44,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:44,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368224143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368224144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:44,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368224146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:44,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368224149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:44,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368224149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/424db21ff0a74a28ac89d322558fd3cf 2024-11-23T13:22:44,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/318e8b2bf27743c6afe593996b6ebc3b is 50, key is test_row_0/C:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:44,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742232_1408 (size=12101) 2024-11-23T13:22:44,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:44,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:44,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,439 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:44,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:44,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,592 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:44,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/318e8b2bf27743c6afe593996b6ebc3b 2024-11-23T13:22:44,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/75c0769952d3426995c86b7f95f725b8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8 2024-11-23T13:22:44,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8, entries=200, sequenceid=133, filesize=38.8 K 2024-11-23T13:22:44,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/424db21ff0a74a28ac89d322558fd3cf as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf 2024-11-23T13:22:44,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf, entries=150, sequenceid=133, filesize=11.8 K 2024-11-23T13:22:44,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/318e8b2bf27743c6afe593996b6ebc3b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b 2024-11-23T13:22:44,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b, entries=150, sequenceid=133, filesize=11.8 K 2024-11-23T13:22:44,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1667ms, sequenceid=133, compaction requested=true 2024-11-23T13:22:44,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:44,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:44,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:44,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:44,656 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:44,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:44,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:44,656 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:44,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:44,657 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,657 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,657 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/63f1509c5b6a477bafd83ff1deff2513, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.5 K 2024-11-23T13:22:44,657 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=99.4 K 2024-11-23T13:22:44,657 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8] 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 63f1509c5b6a477bafd83ff1deff2513, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160153 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3571bcc388d947dcbf9970a46284894a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160153 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting aca1994bcc474cef8cb7adbfa289ed06, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368160227 2024-11-23T13:22:44,657 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffa8a9fe87f04390939e2a03f1e6fc32, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368160227 2024-11-23T13:22:44,658 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 424db21ff0a74a28ac89d322558fd3cf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:44,658 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75c0769952d3426995c86b7f95f725b8, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:44,670 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:44,671 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:44,671 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/4e7ea3abf7814676816a9366ac202dcd is 50, key is test_row_0/B:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:44,672 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411237004631330e3426c9c51437c0b09a34e_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:44,673 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411237004631330e3426c9c51437c0b09a34e_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:44,674 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237004631330e3426c9c51437c0b09a34e_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:44,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742233_1409 (size=12409) 2024-11-23T13:22:44,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742234_1410 (size=4469) 2024-11-23T13:22:44,745 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:44,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-23T13:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:44,746 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:22:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:44,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:44,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:44,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:44,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:44,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:44,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a4708b29971c4826a382b5beb8a65d3d_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368163017/Put/seqid=0 2024-11-23T13:22:44,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742235_1411 (size=12304) 2024-11-23T13:22:44,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:44,763 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a4708b29971c4826a382b5beb8a65d3d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4708b29971c4826a382b5beb8a65d3d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:44,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3f3c0a3b9eb745528cd7c02e013812d7, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:44,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3f3c0a3b9eb745528cd7c02e013812d7 is 175, key is test_row_0/A:col10/1732368163017/Put/seqid=0 2024-11-23T13:22:44,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742236_1412 (size=31105) 2024-11-23T13:22:45,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:45,086 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#348 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:45,087 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/895527800f0145ac9f4ef1acf2dfb2b9 is 175, key is test_row_0/A:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:45,088 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/4e7ea3abf7814676816a9366ac202dcd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4e7ea3abf7814676816a9366ac202dcd 2024-11-23T13:22:45,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742237_1413 (size=31363) 2024-11-23T13:22:45,093 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into 4e7ea3abf7814676816a9366ac202dcd(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:45,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:45,093 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368164655; duration=0sec 2024-11-23T13:22:45,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:45,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:45,093 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:45,095 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:45,095 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:45,095 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:45,095 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/400b139fc21c43aa84333d452e7fe474, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.5 K 2024-11-23T13:22:45,096 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 400b139fc21c43aa84333d452e7fe474, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1732368160153 2024-11-23T13:22:45,097 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8abab0b83a4149cfae824dfa33e29971, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1732368160227 2024-11-23T13:22:45,098 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 318e8b2bf27743c6afe593996b6ebc3b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:45,099 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/895527800f0145ac9f4ef1acf2dfb2b9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9 2024-11-23T13:22:45,103 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 895527800f0145ac9f4ef1acf2dfb2b9(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:45,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:45,103 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368164655; duration=0sec 2024-11-23T13:22:45,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:45,103 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:45,106 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:45,107 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/b2adff6bcb4946298d55f509d62eeaae is 50, key is test_row_0/C:col10/1732368162371/Put/seqid=0 2024-11-23T13:22:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742238_1414 (size=12409) 2024-11-23T13:22:45,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:45,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:45,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368225161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368225161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,168 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3f3c0a3b9eb745528cd7c02e013812d7 2024-11-23T13:22:45,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368225166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368225167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368225167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/0ec595a152824f328145fc4a88ad20dc is 50, key is test_row_0/B:col10/1732368163017/Put/seqid=0 2024-11-23T13:22:45,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742239_1415 (size=12151) 2024-11-23T13:22:45,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368225268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368225268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368225269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368225272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368225272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368225472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368225472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368225472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368225476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368225477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,516 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/b2adff6bcb4946298d55f509d62eeaae as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2adff6bcb4946298d55f509d62eeaae 2024-11-23T13:22:45,521 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into b2adff6bcb4946298d55f509d62eeaae(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:45,521 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:45,521 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368164656; duration=0sec 2024-11-23T13:22:45,521 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:45,521 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:45,580 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/0ec595a152824f328145fc4a88ad20dc 2024-11-23T13:22:45,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/2b308601dfa44236b8fccaf7894e9887 is 50, key is test_row_0/C:col10/1732368163017/Put/seqid=0 2024-11-23T13:22:45,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742240_1416 (size=12151) 2024-11-23T13:22:45,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368225777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368225777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368225777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368225780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:45,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368225782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:45,998 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/2b308601dfa44236b8fccaf7894e9887 2024-11-23T13:22:46,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/3f3c0a3b9eb745528cd7c02e013812d7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7 2024-11-23T13:22:46,005 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7, entries=150, sequenceid=158, filesize=30.4 K 2024-11-23T13:22:46,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/0ec595a152824f328145fc4a88ad20dc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc 2024-11-23T13:22:46,011 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc, entries=150, sequenceid=158, filesize=11.9 K 2024-11-23T13:22:46,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/2b308601dfa44236b8fccaf7894e9887 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887 2024-11-23T13:22:46,016 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887, entries=150, sequenceid=158, filesize=11.9 K 2024-11-23T13:22:46,017 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1271ms, sequenceid=158, compaction requested=false 2024-11-23T13:22:46,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:46,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:46,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-23T13:22:46,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-23T13:22:46,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-23T13:22:46,019 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1050 sec 2024-11-23T13:22:46,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 3.1090 sec 2024-11-23T13:22:46,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:46,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:46,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:46,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112349d92b7a4b2448bb9691a1430270fd7c_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:46,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742241_1417 (size=12304) 2024-11-23T13:22:46,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368226307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368226308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368226312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368226313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368226313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368226414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368226415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368226419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368226420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368226422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368226618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368226619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368226624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368226625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368226625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,695 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:46,699 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112349d92b7a4b2448bb9691a1430270fd7c_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112349d92b7a4b2448bb9691a1430270fd7c_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:46,700 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/70ad7e3a881e4276a9d3f82fe619e71b, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:46,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/70ad7e3a881e4276a9d3f82fe619e71b is 175, key is test_row_0/A:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:46,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742242_1418 (size=31105) 2024-11-23T13:22:46,705 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/70ad7e3a881e4276a9d3f82fe619e71b 2024-11-23T13:22:46,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/11a216a30785493c8debc06c3ce20018 is 50, key is test_row_0/B:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:46,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742243_1419 (size=12151) 2024-11-23T13:22:46,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368226922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368226922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368226929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368226930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:46,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:46,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368226931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-23T13:22:47,016 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-23T13:22:47,018 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:47,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-23T13:22:47,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T13:22:47,019 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:47,020 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:47,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:47,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/11a216a30785493c8debc06c3ce20018 2024-11-23T13:22:47,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T13:22:47,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8acabb0789ab43099ff49a7dc42c927a is 50, key is test_row_0/C:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:47,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742244_1420 (size=12151) 2024-11-23T13:22:47,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8acabb0789ab43099ff49a7dc42c927a 2024-11-23T13:22:47,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/70ad7e3a881e4276a9d3f82fe619e71b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b 2024-11-23T13:22:47,145 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b, entries=150, sequenceid=174, filesize=30.4 K 2024-11-23T13:22:47,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/11a216a30785493c8debc06c3ce20018 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018 2024-11-23T13:22:47,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T13:22:47,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/8acabb0789ab43099ff49a7dc42c927a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a 2024-11-23T13:22:47,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a, entries=150, sequenceid=174, filesize=11.9 K 2024-11-23T13:22:47,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 2d79dbed6dcd0da8c65ba796b67996f6 in 873ms, sequenceid=174, compaction requested=true 2024-11-23T13:22:47,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:47,156 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:47,157 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-23T13:22:47,157 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:47,158 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93573 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:47,158 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:47,158 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:47,158 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:47,158 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:47,158 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:47,158 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=91.4 K 2024-11-23T13:22:47,158 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4e7ea3abf7814676816a9366ac202dcd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.9 K 2024-11-23T13:22:47,158 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:47,158 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b] 2024-11-23T13:22:47,159 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e7ea3abf7814676816a9366ac202dcd, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:47,159 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 895527800f0145ac9f4ef1acf2dfb2b9, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:47,159 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f3c0a3b9eb745528cd7c02e013812d7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732368163017 2024-11-23T13:22:47,159 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ec595a152824f328145fc4a88ad20dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732368163017 2024-11-23T13:22:47,160 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 11a216a30785493c8debc06c3ce20018, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:47,161 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70ad7e3a881e4276a9d3f82fe619e71b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:47,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:47,172 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:47,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:47,173 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#356 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:47,173 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/38385210c27e4071aabd10676c5b975f is 50, key is test_row_0/B:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:47,174 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:47,177 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112330900455da0f42138d2f9701fc1dfb41_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:47,179 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112330900455da0f42138d2f9701fc1dfb41_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:47,179 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112330900455da0f42138d2f9701fc1dfb41_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:47,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742245_1421 (size=12561) 2024-11-23T13:22:47,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123314da1d0410d4e95a74984d4eaef882e_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368166311/Put/seqid=0 2024-11-23T13:22:47,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742246_1422 (size=4469) 2024-11-23T13:22:47,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742247_1423 (size=12304) 2024-11-23T13:22:47,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T13:22:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:47,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:47,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368227442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368227442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368227443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368227443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368227444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368227550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368227550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368227552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368227552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368227553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,590 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/38385210c27e4071aabd10676c5b975f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/38385210c27e4071aabd10676c5b975f 2024-11-23T13:22:47,594 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into 38385210c27e4071aabd10676c5b975f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:47,595 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:47,595 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368167157; duration=0sec 2024-11-23T13:22:47,595 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:47,595 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:47,595 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:47,596 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:47,596 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:47,596 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:47,596 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2adff6bcb4946298d55f509d62eeaae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=35.9 K 2024-11-23T13:22:47,596 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b2adff6bcb4946298d55f509d62eeaae, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732368162371 2024-11-23T13:22:47,596 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b308601dfa44236b8fccaf7894e9887, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1732368163017 2024-11-23T13:22:47,597 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8acabb0789ab43099ff49a7dc42c927a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:47,602 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#357 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:47,603 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/277b54b472ed4a7bbb84d2a08ca9cc0c is 175, key is test_row_0/A:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:47,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:47,613 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123314da1d0410d4e95a74984d4eaef882e_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123314da1d0410d4e95a74984d4eaef882e_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:47,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f1f46229843143ea967becb9dbe51d8b, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:47,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f1f46229843143ea967becb9dbe51d8b is 175, key is test_row_0/A:col10/1732368166311/Put/seqid=0 2024-11-23T13:22:47,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T13:22:47,623 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:47,624 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/a12fc773b05b484d9d0692a417ac8f2f is 50, key is test_row_0/C:col10/1732368165165/Put/seqid=0 2024-11-23T13:22:47,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742248_1424 (size=31515) 2024-11-23T13:22:47,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742249_1425 (size=31105) 2024-11-23T13:22:47,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742250_1426 (size=12561) 2024-11-23T13:22:47,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368227755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368227755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368227756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368227756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:47,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:47,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368227757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,027 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f1f46229843143ea967becb9dbe51d8b 2024-11-23T13:22:48,031 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/277b54b472ed4a7bbb84d2a08ca9cc0c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c 2024-11-23T13:22:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/eb5eb683769c4c498a8942e685d0960f is 50, key is test_row_0/B:col10/1732368166311/Put/seqid=0 2024-11-23T13:22:48,039 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 277b54b472ed4a7bbb84d2a08ca9cc0c(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:48,039 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:48,039 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368167156; duration=0sec 2024-11-23T13:22:48,039 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:48,039 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:48,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742251_1427 (size=12151) 2024-11-23T13:22:48,040 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/eb5eb683769c4c498a8942e685d0960f 2024-11-23T13:22:48,041 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/a12fc773b05b484d9d0692a417ac8f2f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a12fc773b05b484d9d0692a417ac8f2f 2024-11-23T13:22:48,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/a7f0d074af874ce889e8cd9b8efbeaf8 is 50, key is test_row_0/C:col10/1732368166311/Put/seqid=0 2024-11-23T13:22:48,046 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into a12fc773b05b484d9d0692a417ac8f2f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:48,046 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:48,046 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368167157; duration=0sec 2024-11-23T13:22:48,046 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:48,046 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:48,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742252_1428 (size=12151) 2024-11-23T13:22:48,049 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/a7f0d074af874ce889e8cd9b8efbeaf8 2024-11-23T13:22:48,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/f1f46229843143ea967becb9dbe51d8b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b 2024-11-23T13:22:48,056 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b, entries=150, sequenceid=198, filesize=30.4 K 2024-11-23T13:22:48,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/eb5eb683769c4c498a8942e685d0960f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f 2024-11-23T13:22:48,061 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f, entries=150, sequenceid=198, filesize=11.9 K 2024-11-23T13:22:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/a7f0d074af874ce889e8cd9b8efbeaf8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8 2024-11-23T13:22:48,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368228057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368228060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,065 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8, entries=150, sequenceid=198, filesize=11.9 K 2024-11-23T13:22:48,066 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 2d79dbed6dcd0da8c65ba796b67996f6 in 894ms, sequenceid=198, compaction requested=false 2024-11-23T13:22:48,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:48,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-23T13:22:48,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-23T13:22:48,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-23T13:22:48,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0470 sec 2024-11-23T13:22:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:48,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:22:48,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.0510 sec 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:48,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112357fdbdb7823d43f99550ea900f3d5bb8_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:48,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742253_1429 (size=12304) 2024-11-23T13:22:48,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-23T13:22:48,122 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-23T13:22:48,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368228117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368228117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368228118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:48,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-23T13:22:48,125 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:48,126 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:48,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:48,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:48,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:48,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368228224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368228224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,228 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368228225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:48,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:48,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,431 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368228429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368228429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368228430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,494 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:48,498 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112357fdbdb7823d43f99550ea900f3d5bb8_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357fdbdb7823d43f99550ea900f3d5bb8_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:48,499 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/9fd4cd28ef8444f186aa009125740542, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:48,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/9fd4cd28ef8444f186aa009125740542 is 175, key is test_row_0/A:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:48,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742254_1430 (size=31105) 2024-11-23T13:22:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368228566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368228567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:48,736 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368228735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368228736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:48,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368228737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:48,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:48,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:48,905 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/9fd4cd28ef8444f186aa009125740542 2024-11-23T13:22:48,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/66c9541cf95047588d650ea7309c52b1 is 50, key is test_row_0/B:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:48,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742255_1431 (size=12151) 2024-11-23T13:22:49,042 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:49,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:49,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368229240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:49,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368229242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:49,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368229243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/66c9541cf95047588d650ea7309c52b1 2024-11-23T13:22:49,325 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/38e6a7b901ea4d96865ff97c5a0b80c4 is 50, key is test_row_0/C:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:49,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742256_1432 (size=12151) 2024-11-23T13:22:49,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:49,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,500 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:49,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:49,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368229574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:49,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368229580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:49,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:22:49,730 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/38e6a7b901ea4d96865ff97c5a0b80c4 2024-11-23T13:22:49,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/9fd4cd28ef8444f186aa009125740542 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542 2024-11-23T13:22:49,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542, entries=150, sequenceid=215, filesize=30.4 K 2024-11-23T13:22:49,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/66c9541cf95047588d650ea7309c52b1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1 2024-11-23T13:22:49,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1, entries=150, sequenceid=215, filesize=11.9 K 2024-11-23T13:22:49,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/38e6a7b901ea4d96865ff97c5a0b80c4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4 2024-11-23T13:22:49,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4, entries=150, sequenceid=215, filesize=11.9 K 2024-11-23T13:22:49,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1677ms, sequenceid=215, compaction requested=true 2024-11-23T13:22:49,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:49,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:49,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:49,746 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:49,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:49,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:49,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:49,747 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:49,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:49,747 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:49,747 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:49,747 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:49,747 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:49,747 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,747 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,748 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/38385210c27e4071aabd10676c5b975f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=36.0 K 2024-11-23T13:22:49,748 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=91.5 K 2024-11-23T13:22:49,748 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542] 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 38385210c27e4071aabd10676c5b975f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 277b54b472ed4a7bbb84d2a08ca9cc0c, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting eb5eb683769c4c498a8942e685d0960f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732368166301 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 66c9541cf95047588d650ea7309c52b1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:49,748 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1f46229843143ea967becb9dbe51d8b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732368166301 2024-11-23T13:22:49,749 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fd4cd28ef8444f186aa009125740542, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:49,754 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:49,756 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123b93e7c6b818c445686beed3c0d202f19_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:49,756 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:49,757 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/7e1de1f2a33040928bb5898f1c960f6a is 50, key is test_row_0/B:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:49,757 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123b93e7c6b818c445686beed3c0d202f19_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:49,758 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123b93e7c6b818c445686beed3c0d202f19_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742257_1433 (size=12663) 2024-11-23T13:22:49,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742258_1434 (size=4469) 2024-11-23T13:22:49,806 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:49,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-23T13:22:49,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:49,806 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:49,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:49,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ea30491b4a764b20ada23c93d9f3b596_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368168104/Put/seqid=0 2024-11-23T13:22:49,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742259_1435 (size=12304) 2024-11-23T13:22:49,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:49,818 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123ea30491b4a764b20ada23c93d9f3b596_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ea30491b4a764b20ada23c93d9f3b596_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:49,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/cb1238c28069420ba3d79f80a4221cc1, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:49,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/cb1238c28069420ba3d79f80a4221cc1 is 175, key is test_row_0/A:col10/1732368168104/Put/seqid=0 2024-11-23T13:22:49,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742260_1436 (size=31105) 2024-11-23T13:22:49,824 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/cb1238c28069420ba3d79f80a4221cc1 2024-11-23T13:22:49,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/a560e47744eb434691c9561f76c9ed4d is 50, key is test_row_0/B:col10/1732368168104/Put/seqid=0 2024-11-23T13:22:49,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742261_1437 (size=12151) 2024-11-23T13:22:50,162 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#365 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:50,163 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/1df5000a418448939df98171fb2d9dba is 175, key is test_row_0/A:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:50,166 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/7e1de1f2a33040928bb5898f1c960f6a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/7e1de1f2a33040928bb5898f1c960f6a 2024-11-23T13:22:50,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742262_1438 (size=31617) 2024-11-23T13:22:50,170 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into 7e1de1f2a33040928bb5898f1c960f6a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:50,170 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:50,170 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368169746; duration=0sec 2024-11-23T13:22:50,170 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:50,170 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:50,170 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:50,171 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:50,172 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:50,172 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:50,172 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a12fc773b05b484d9d0692a417ac8f2f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=36.0 K 2024-11-23T13:22:50,172 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a12fc773b05b484d9d0692a417ac8f2f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732368165160 2024-11-23T13:22:50,172 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a7f0d074af874ce889e8cd9b8efbeaf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1732368166301 2024-11-23T13:22:50,172 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/1df5000a418448939df98171fb2d9dba as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba 2024-11-23T13:22:50,173 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 38e6a7b901ea4d96865ff97c5a0b80c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:50,176 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 1df5000a418448939df98171fb2d9dba(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:50,176 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:50,176 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368169746; duration=0sec 2024-11-23T13:22:50,177 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:50,177 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:50,179 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:50,180 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/01bb6d2bd512483ba5a6db57acc9e661 is 50, key is test_row_0/C:col10/1732368167443/Put/seqid=0 2024-11-23T13:22:50,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742263_1439 (size=12663) 2024-11-23T13:22:50,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:50,248 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/a560e47744eb434691c9561f76c9ed4d 2024-11-23T13:22:50,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9d90876c9da047a3b57f32419dbad86e is 50, key is test_row_0/C:col10/1732368168104/Put/seqid=0 2024-11-23T13:22:50,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:50,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:50,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742264_1440 (size=12151) 2024-11-23T13:22:50,259 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9d90876c9da047a3b57f32419dbad86e 2024-11-23T13:22:50,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/cb1238c28069420ba3d79f80a4221cc1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1 2024-11-23T13:22:50,267 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1, entries=150, sequenceid=237, filesize=30.4 K 2024-11-23T13:22:50,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/a560e47744eb434691c9561f76c9ed4d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d 2024-11-23T13:22:50,272 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d, entries=150, sequenceid=237, filesize=11.9 K 2024-11-23T13:22:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9d90876c9da047a3b57f32419dbad86e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e 2024-11-23T13:22:50,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,279 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e, entries=150, sequenceid=237, filesize=11.9 K 2024-11-23T13:22:50,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,280 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 2d79dbed6dcd0da8c65ba796b67996f6 in 474ms, sequenceid=237, compaction requested=false 2024-11-23T13:22:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:50,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:50,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-23T13:22:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-23T13:22:50,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-23T13:22:50,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1550 sec 2024-11-23T13:22:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-23T13:22:50,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,284 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:50,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:50,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:50,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:50,285 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:50,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.1600 sec 2024-11-23T13:22:50,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e32cf7a32d1c4084ba52feb40dd8827d_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742265_1441 (size=24758) 2024-11-23T13:22:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368230335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368230336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368230339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368230442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368230443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368230448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,589 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/01bb6d2bd512483ba5a6db57acc9e661 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01bb6d2bd512483ba5a6db57acc9e661 2024-11-23T13:22:50,594 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into 01bb6d2bd512483ba5a6db57acc9e661(size=12.4 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:50,594 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:50,594 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368169746; duration=0sec 2024-11-23T13:22:50,594 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:50,594 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:50,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368230647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368230648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368230654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,699 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:50,703 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123e32cf7a32d1c4084ba52feb40dd8827d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e32cf7a32d1c4084ba52feb40dd8827d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:50,703 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/634bf5e20cab46019dfb079042c1fac7, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:50,704 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/634bf5e20cab46019dfb079042c1fac7 is 175, key is test_row_0/A:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742266_1442 (size=74395) 2024-11-23T13:22:50,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368230951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368230956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:50,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:50,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368230960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,107 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/634bf5e20cab46019dfb079042c1fac7 2024-11-23T13:22:51,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/c3832f5fcc234ef6bb64344650537445 is 50, key is test_row_0/B:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:51,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742267_1443 (size=12151) 2024-11-23T13:22:51,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:51,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368231457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:51,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368231459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:51,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368231465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/c3832f5fcc234ef6bb64344650537445 2024-11-23T13:22:51,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/579207da89c44e698b3d64bdb0ad3180 is 50, key is test_row_0/C:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:51,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742268_1444 (size=12151) 2024-11-23T13:22:51,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:51,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41224 deadline: 1732368231582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,586 DEBUG [Thread-1673 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:51,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41254 deadline: 1732368231601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:51,605 DEBUG [Thread-1671 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:22:51,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/579207da89c44e698b3d64bdb0ad3180 2024-11-23T13:22:51,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/634bf5e20cab46019dfb079042c1fac7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7 2024-11-23T13:22:51,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7, entries=400, sequenceid=255, filesize=72.7 K 2024-11-23T13:22:51,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/c3832f5fcc234ef6bb64344650537445 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445 2024-11-23T13:22:51,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445, entries=150, sequenceid=255, filesize=11.9 K 2024-11-23T13:22:51,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/579207da89c44e698b3d64bdb0ad3180 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180 2024-11-23T13:22:51,950 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180, entries=150, sequenceid=255, filesize=11.9 K 2024-11-23T13:22:51,951 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1668ms, sequenceid=255, compaction requested=true 2024-11-23T13:22:51,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:51,951 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:51,952 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 2d79dbed6dcd0da8c65ba796b67996f6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:22:51,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:51,957 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137117 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:51,957 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/A is initiating minor compaction (all files) 2024-11-23T13:22:51,957 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/A in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:51,957 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=133.9 K 2024-11-23T13:22:51,957 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:51,957 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7] 2024-11-23T13:22:51,958 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1df5000a418448939df98171fb2d9dba, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:51,958 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:51,958 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb1238c28069420ba3d79f80a4221cc1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732368168104 2024-11-23T13:22:51,958 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/B is initiating minor compaction (all files) 2024-11-23T13:22:51,958 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/B in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:51,958 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/7e1de1f2a33040928bb5898f1c960f6a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=36.1 K 2024-11-23T13:22:51,959 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e1de1f2a33040928bb5898f1c960f6a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:51,959 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 634bf5e20cab46019dfb079042c1fac7, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732368170263 2024-11-23T13:22:51,959 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a560e47744eb434691c9561f76c9ed4d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732368168104 2024-11-23T13:22:51,960 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c3832f5fcc234ef6bb64344650537445, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732368170277 2024-11-23T13:22:51,968 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:51,968 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#B#compaction#374 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:51,969 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/4d0cee8fe59d447f9e1b1a89f5684622 is 50, key is test_row_0/B:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:51,970 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411230fcbe15c515a43fabc1ac61fc87ed739_2d79dbed6dcd0da8c65ba796b67996f6 store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:51,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742269_1445 (size=12765) 2024-11-23T13:22:51,973 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411230fcbe15c515a43fabc1ac61fc87ed739_2d79dbed6dcd0da8c65ba796b67996f6, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:51,973 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411230fcbe15c515a43fabc1ac61fc87ed739_2d79dbed6dcd0da8c65ba796b67996f6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:51,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742270_1446 (size=4469) 2024-11-23T13:22:51,978 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#A#compaction#375 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:51,979 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/319e274c01c14783b0e3f9ad976c6e82 is 175, key is test_row_0/A:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:51,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742271_1447 (size=31719) 2024-11-23T13:22:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-23T13:22:52,231 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-23T13:22:52,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-23T13:22:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:52,234 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:52,234 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:52,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:52,377 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/4d0cee8fe59d447f9e1b1a89f5684622 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4d0cee8fe59d447f9e1b1a89f5684622 2024-11-23T13:22:52,382 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/B of 2d79dbed6dcd0da8c65ba796b67996f6 into 4d0cee8fe59d447f9e1b1a89f5684622(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:52,382 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:52,382 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/B, priority=13, startTime=1732368171952; duration=0sec 2024-11-23T13:22:52,382 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:22:52,382 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:B 2024-11-23T13:22:52,382 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:22:52,383 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:22:52,383 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 2d79dbed6dcd0da8c65ba796b67996f6/C is initiating minor compaction (all files) 2024-11-23T13:22:52,383 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 2d79dbed6dcd0da8c65ba796b67996f6/C in TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:52,383 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01bb6d2bd512483ba5a6db57acc9e661, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp, totalSize=36.1 K 2024-11-23T13:22:52,384 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 01bb6d2bd512483ba5a6db57acc9e661, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732368167443 2024-11-23T13:22:52,384 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d90876c9da047a3b57f32419dbad86e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1732368168104 2024-11-23T13:22:52,385 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 579207da89c44e698b3d64bdb0ad3180, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732368170277 2024-11-23T13:22:52,387 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/319e274c01c14783b0e3f9ad976c6e82 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/319e274c01c14783b0e3f9ad976c6e82 2024-11-23T13:22:52,389 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-23T13:22:52,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:52,389 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:52,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:52,391 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/A of 2d79dbed6dcd0da8c65ba796b67996f6 into 319e274c01c14783b0e3f9ad976c6e82(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:52,391 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:52,391 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/A, priority=13, startTime=1732368171951; duration=0sec 2024-11-23T13:22:52,392 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:52,392 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:A 2024-11-23T13:22:52,393 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2d79dbed6dcd0da8c65ba796b67996f6#C#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:22:52,393 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/450d300bb8644c51a2ef68a040a23742 is 50, key is test_row_0/C:col10/1732368170277/Put/seqid=0 2024-11-23T13:22:52,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a46a0cb234234c68968a5c2059f80f86_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368170337/Put/seqid=0 2024-11-23T13:22:52,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742272_1448 (size=12765) 2024-11-23T13:22:52,412 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/450d300bb8644c51a2ef68a040a23742 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/450d300bb8644c51a2ef68a040a23742 2024-11-23T13:22:52,417 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 2d79dbed6dcd0da8c65ba796b67996f6/C of 2d79dbed6dcd0da8c65ba796b67996f6 into 450d300bb8644c51a2ef68a040a23742(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:22:52,417 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:52,417 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6., storeName=2d79dbed6dcd0da8c65ba796b67996f6/C, priority=13, startTime=1732368171952; duration=0sec 2024-11-23T13:22:52,417 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:22:52,417 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2d79dbed6dcd0da8c65ba796b67996f6:C 2024-11-23T13:22:52,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742273_1449 (size=12454) 2024-11-23T13:22:52,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:52,443 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a46a0cb234234c68968a5c2059f80f86_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a46a0cb234234c68968a5c2059f80f86_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:52,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/816fcf290262419ebc94d4f51573f5d0, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:52,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/816fcf290262419ebc94d4f51573f5d0 is 175, key is test_row_0/A:col10/1732368170337/Put/seqid=0 2024-11-23T13:22:52,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742274_1450 (size=31255) 2024-11-23T13:22:52,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:52,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368232490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368232491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368232494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:52,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368232599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368232599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368232602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368232804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368232806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:52,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368232806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:52,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:52,849 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/816fcf290262419ebc94d4f51573f5d0 2024-11-23T13:22:52,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/36c4e3f199d146b389f672eae6f9911b is 50, key is test_row_0/B:col10/1732368170337/Put/seqid=0 2024-11-23T13:22:52,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742275_1451 (size=12301) 2024-11-23T13:22:53,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368233108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368233110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368233112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,262 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/36c4e3f199d146b389f672eae6f9911b 2024-11-23T13:22:53,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9e92e44d4f7149d0aa3dd15e608218fb is 50, key is test_row_0/C:col10/1732368170337/Put/seqid=0 2024-11-23T13:22:53,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742276_1452 (size=12301) 2024-11-23T13:22:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:53,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41240 deadline: 1732368233614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41190 deadline: 1732368233616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:22:53,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41186 deadline: 1732368233620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:53,680 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9e92e44d4f7149d0aa3dd15e608218fb 2024-11-23T13:22:53,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/816fcf290262419ebc94d4f51573f5d0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/816fcf290262419ebc94d4f51573f5d0 2024-11-23T13:22:53,688 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/816fcf290262419ebc94d4f51573f5d0, entries=150, sequenceid=277, filesize=30.5 K 2024-11-23T13:22:53,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/36c4e3f199d146b389f672eae6f9911b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/36c4e3f199d146b389f672eae6f9911b 2024-11-23T13:22:53,693 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/36c4e3f199d146b389f672eae6f9911b, entries=150, sequenceid=277, filesize=12.0 K 2024-11-23T13:22:53,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/9e92e44d4f7149d0aa3dd15e608218fb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e92e44d4f7149d0aa3dd15e608218fb 2024-11-23T13:22:53,696 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e92e44d4f7149d0aa3dd15e608218fb, entries=150, sequenceid=277, filesize=12.0 K 2024-11-23T13:22:53,697 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1308ms, sequenceid=277, compaction requested=false 2024-11-23T13:22:53,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:53,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:53,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-23T13:22:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-23T13:22:53,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-23T13:22:53,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4630 sec 2024-11-23T13:22:53,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.4680 sec 2024-11-23T13:22:54,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-23T13:22:54,338 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-23T13:22:54,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:22:54,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-23T13:22:54,340 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:22:54,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:54,341 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:22:54,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:22:54,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:54,492 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:54,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-23T13:22:54,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:54,493 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-23T13:22:54,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:54,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:54,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:54,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:54,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:54,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:54,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123055abb5e8aec4e9b89c6b53481ca0549_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368172490/Put/seqid=0 2024-11-23T13:22:54,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742277_1453 (size=12454) 2024-11-23T13:22:54,587 DEBUG [Thread-1678 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:51875 2024-11-23T13:22:54,587 DEBUG [Thread-1678 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,588 DEBUG [Thread-1676 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:51875 2024-11-23T13:22:54,588 DEBUG [Thread-1676 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,589 DEBUG [Thread-1684 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:51875 2024-11-23T13:22:54,589 DEBUG [Thread-1684 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,591 DEBUG [Thread-1680 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:51875 2024-11-23T13:22:54,591 DEBUG [Thread-1680 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,592 DEBUG [Thread-1682 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:51875 2024-11-23T13:22:54,592 DEBUG [Thread-1682 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:54,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. as already flushing 2024-11-23T13:22:54,627 DEBUG [Thread-1667 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:51875 2024-11-23T13:22:54,627 DEBUG [Thread-1667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,628 DEBUG [Thread-1665 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3637e4c6 to 127.0.0.1:51875 2024-11-23T13:22:54,628 DEBUG [Thread-1665 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,632 DEBUG [Thread-1669 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:51875 2024-11-23T13:22:54,632 DEBUG [Thread-1669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:54,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:54,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:54,911 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123055abb5e8aec4e9b89c6b53481ca0549_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123055abb5e8aec4e9b89c6b53481ca0549_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:54,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/0dceb4caeff74d06a2b3522e33f0bc57, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:54,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/0dceb4caeff74d06a2b3522e33f0bc57 is 175, key is test_row_0/A:col10/1732368172490/Put/seqid=0 2024-11-23T13:22:54,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742278_1454 (size=31255) 2024-11-23T13:22:54,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:55,316 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/0dceb4caeff74d06a2b3522e33f0bc57 2024-11-23T13:22:55,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/71b8472ef3ae441286bd09ec5ec9c343 is 50, key is test_row_0/B:col10/1732368172490/Put/seqid=0 2024-11-23T13:22:55,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742279_1455 (size=12301) 2024-11-23T13:22:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:55,589 DEBUG [Thread-1673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:51875 2024-11-23T13:22:55,589 DEBUG [Thread-1673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:55,640 DEBUG [Thread-1671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:51875 2024-11-23T13:22:55,640 DEBUG [Thread-1671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:55,726 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/71b8472ef3ae441286bd09ec5ec9c343 2024-11-23T13:22:55,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/ed74e7061ce4431ea702347a7e4aeeb1 is 50, key is test_row_0/C:col10/1732368172490/Put/seqid=0 2024-11-23T13:22:55,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742280_1456 (size=12301) 2024-11-23T13:22:56,135 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/ed74e7061ce4431ea702347a7e4aeeb1 2024-11-23T13:22:56,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/0dceb4caeff74d06a2b3522e33f0bc57 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/0dceb4caeff74d06a2b3522e33f0bc57 2024-11-23T13:22:56,141 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/0dceb4caeff74d06a2b3522e33f0bc57, entries=150, sequenceid=295, filesize=30.5 K 2024-11-23T13:22:56,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/71b8472ef3ae441286bd09ec5ec9c343 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/71b8472ef3ae441286bd09ec5ec9c343 2024-11-23T13:22:56,145 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/71b8472ef3ae441286bd09ec5ec9c343, entries=150, sequenceid=295, filesize=12.0 K 2024-11-23T13:22:56,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/ed74e7061ce4431ea702347a7e4aeeb1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/ed74e7061ce4431ea702347a7e4aeeb1 2024-11-23T13:22:56,148 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/ed74e7061ce4431ea702347a7e4aeeb1, entries=150, sequenceid=295, filesize=12.0 K 2024-11-23T13:22:56,149 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=33.54 KB/34350 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1656ms, sequenceid=295, compaction requested=true 2024-11-23T13:22:56,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:56,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:56,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-23T13:22:56,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-23T13:22:56,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-23T13:22:56,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8090 sec 2024-11-23T13:22:56,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8120 sec 2024-11-23T13:22:56,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-23T13:22:56,444 INFO [Thread-1675 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:22:56,444 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2692 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8076 rows 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2679 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8037 rows 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2674 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8022 rows 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2696 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8088 rows 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2684 2024-11-23T13:22:56,445 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8052 rows 2024-11-23T13:22:56,445 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:22:56,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:51875 2024-11-23T13:22:56,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:22:56,447 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:22:56,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:22:56,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:56,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:56,451 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368176451"}]},"ts":"1732368176451"} 2024-11-23T13:22:56,452 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:22:56,454 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:22:56,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:22:56,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, UNASSIGN}] 2024-11-23T13:22:56,456 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, UNASSIGN 2024-11-23T13:22:56,457 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:56,458 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:22:56,458 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; CloseRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:56,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:56,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:56,609 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(124): Close 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:56,609 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:22:56,609 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1681): Closing 2d79dbed6dcd0da8c65ba796b67996f6, disabling compactions & flushes 2024-11-23T13:22:56,609 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:56,609 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:56,609 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. after waiting 0 ms 2024-11-23T13:22:56,609 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:56,609 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(2837): Flushing 2d79dbed6dcd0da8c65ba796b67996f6 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=A 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=B 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 2d79dbed6dcd0da8c65ba796b67996f6, store=C 2024-11-23T13:22:56,610 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:22:56,614 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239d9977c74e484922922c5972f424f451_2d79dbed6dcd0da8c65ba796b67996f6 is 50, key is test_row_0/A:col10/1732368175588/Put/seqid=0 2024-11-23T13:22:56,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742281_1457 (size=9914) 2024-11-23T13:22:56,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:57,018 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:57,021 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239d9977c74e484922922c5972f424f451_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239d9977c74e484922922c5972f424f451_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:57,022 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/82b26059e49449039e7fdccc19ffee88, store: [table=TestAcidGuarantees family=A region=2d79dbed6dcd0da8c65ba796b67996f6] 2024-11-23T13:22:57,023 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/82b26059e49449039e7fdccc19ffee88 is 175, key is test_row_0/A:col10/1732368175588/Put/seqid=0 2024-11-23T13:22:57,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742282_1458 (size=22561) 2024-11-23T13:22:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:57,427 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/82b26059e49449039e7fdccc19ffee88 2024-11-23T13:22:57,432 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/fecc0ad6212441349f87a9b24f36596e is 50, key is test_row_0/B:col10/1732368175588/Put/seqid=0 2024-11-23T13:22:57,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742283_1459 (size=9857) 2024-11-23T13:22:57,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:57,836 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/fecc0ad6212441349f87a9b24f36596e 2024-11-23T13:22:57,842 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/bd9d4983b70f43b29c569466200f0927 is 50, key is test_row_0/C:col10/1732368175588/Put/seqid=0 2024-11-23T13:22:57,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742284_1460 (size=9857) 2024-11-23T13:22:58,246 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/bd9d4983b70f43b29c569466200f0927 2024-11-23T13:22:58,249 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/A/82b26059e49449039e7fdccc19ffee88 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/82b26059e49449039e7fdccc19ffee88 2024-11-23T13:22:58,252 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/82b26059e49449039e7fdccc19ffee88, entries=100, sequenceid=303, filesize=22.0 K 2024-11-23T13:22:58,253 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/B/fecc0ad6212441349f87a9b24f36596e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/fecc0ad6212441349f87a9b24f36596e 2024-11-23T13:22:58,255 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/fecc0ad6212441349f87a9b24f36596e, entries=100, sequenceid=303, filesize=9.6 K 2024-11-23T13:22:58,256 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/.tmp/C/bd9d4983b70f43b29c569466200f0927 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/bd9d4983b70f43b29c569466200f0927 2024-11-23T13:22:58,258 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/bd9d4983b70f43b29c569466200f0927, entries=100, sequenceid=303, filesize=9.6 K 2024-11-23T13:22:58,259 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 2d79dbed6dcd0da8c65ba796b67996f6 in 1650ms, sequenceid=303, compaction requested=true 2024-11-23T13:22:58,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7] to archive 2024-11-23T13:22:58,260 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:58,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f0cca3de620446ff8a29962a05faf22e 2024-11-23T13:22:58,262 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/4410be1b40c94d3eb8d9cfd96371a2b1 2024-11-23T13:22:58,263 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/6ad5fc9711d5445a9a84608e88af064b 2024-11-23T13:22:58,264 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/5a68c595ceb04b4f8f5f219a3db4e846 2024-11-23T13:22:58,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/e7cdc8b9886a4027a21d37a47ec27b71 2024-11-23T13:22:58,265 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/7d77399252234e8daa60185c03a773e5 2024-11-23T13:22:58,266 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3571bcc388d947dcbf9970a46284894a 2024-11-23T13:22:58,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/ffa8a9fe87f04390939e2a03f1e6fc32 2024-11-23T13:22:58,267 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/75c0769952d3426995c86b7f95f725b8 2024-11-23T13:22:58,268 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/895527800f0145ac9f4ef1acf2dfb2b9 2024-11-23T13:22:58,269 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/3f3c0a3b9eb745528cd7c02e013812d7 2024-11-23T13:22:58,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/277b54b472ed4a7bbb84d2a08ca9cc0c 2024-11-23T13:22:58,270 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/70ad7e3a881e4276a9d3f82fe619e71b 2024-11-23T13:22:58,271 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/f1f46229843143ea967becb9dbe51d8b 2024-11-23T13:22:58,272 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/1df5000a418448939df98171fb2d9dba 2024-11-23T13:22:58,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/9fd4cd28ef8444f186aa009125740542 2024-11-23T13:22:58,273 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/cb1238c28069420ba3d79f80a4221cc1 2024-11-23T13:22:58,274 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/634bf5e20cab46019dfb079042c1fac7 2024-11-23T13:22:58,275 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/cd1f5198a76f4c66ac5224d4561c11ae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/63f1509c5b6a477bafd83ff1deff2513, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4e7ea3abf7814676816a9366ac202dcd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/38385210c27e4071aabd10676c5b975f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/7e1de1f2a33040928bb5898f1c960f6a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445] to archive 2024-11-23T13:22:58,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:58,276 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/545c12a9da4e43b48528a109bf514a4c 2024-11-23T13:22:58,277 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/dadc375535324e7f888f41fabeafc4d5 2024-11-23T13:22:58,278 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/cd1f5198a76f4c66ac5224d4561c11ae to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/cd1f5198a76f4c66ac5224d4561c11ae 2024-11-23T13:22:58,279 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/8db34368b35e442c8617f14c44c29cb6 2024-11-23T13:22:58,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/1b069bcb576443c5b7024953bb6d0919 2024-11-23T13:22:58,280 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/63f1509c5b6a477bafd83ff1deff2513 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/63f1509c5b6a477bafd83ff1deff2513 2024-11-23T13:22:58,281 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/ecc6751428b54d979bb790b6166b05b4 2024-11-23T13:22:58,282 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/aca1994bcc474cef8cb7adbfa289ed06 2024-11-23T13:22:58,283 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4e7ea3abf7814676816a9366ac202dcd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4e7ea3abf7814676816a9366ac202dcd 2024-11-23T13:22:58,284 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/424db21ff0a74a28ac89d322558fd3cf 2024-11-23T13:22:58,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/0ec595a152824f328145fc4a88ad20dc 2024-11-23T13:22:58,285 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/38385210c27e4071aabd10676c5b975f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/38385210c27e4071aabd10676c5b975f 2024-11-23T13:22:58,286 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/11a216a30785493c8debc06c3ce20018 2024-11-23T13:22:58,287 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/eb5eb683769c4c498a8942e685d0960f 2024-11-23T13:22:58,288 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/7e1de1f2a33040928bb5898f1c960f6a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/7e1de1f2a33040928bb5898f1c960f6a 2024-11-23T13:22:58,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/66c9541cf95047588d650ea7309c52b1 2024-11-23T13:22:58,289 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/a560e47744eb434691c9561f76c9ed4d 2024-11-23T13:22:58,290 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/c3832f5fcc234ef6bb64344650537445 2024-11-23T13:22:58,291 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e3d5f077a154eae956c27339fd3fafd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/400b139fc21c43aa84333d452e7fe474, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2adff6bcb4946298d55f509d62eeaae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a12fc773b05b484d9d0692a417ac8f2f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01bb6d2bd512483ba5a6db57acc9e661, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180] to archive 2024-11-23T13:22:58,292 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:22:58,293 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01cdf1b1d8b14eef87fc2d905754ea0f 2024-11-23T13:22:58,294 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/5a8ad1fa81d741bb83e2c9e223a4b766 2024-11-23T13:22:58,295 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e3d5f077a154eae956c27339fd3fafd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e3d5f077a154eae956c27339fd3fafd 2024-11-23T13:22:58,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/eb79d56b597c4cd2b0883aae2d4b2efa 2024-11-23T13:22:58,296 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/7e4dc703f5154f5e9c2bb7986bc1cac5 2024-11-23T13:22:58,297 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/400b139fc21c43aa84333d452e7fe474 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/400b139fc21c43aa84333d452e7fe474 2024-11-23T13:22:58,298 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2dd7e51ed034f54aa82d5a7fbb3a728 2024-11-23T13:22:58,299 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8abab0b83a4149cfae824dfa33e29971 2024-11-23T13:22:58,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2adff6bcb4946298d55f509d62eeaae to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/b2adff6bcb4946298d55f509d62eeaae 2024-11-23T13:22:58,300 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/318e8b2bf27743c6afe593996b6ebc3b 2024-11-23T13:22:58,301 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/2b308601dfa44236b8fccaf7894e9887 2024-11-23T13:22:58,302 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a12fc773b05b484d9d0692a417ac8f2f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a12fc773b05b484d9d0692a417ac8f2f 2024-11-23T13:22:58,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/8acabb0789ab43099ff49a7dc42c927a 2024-11-23T13:22:58,303 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/a7f0d074af874ce889e8cd9b8efbeaf8 2024-11-23T13:22:58,304 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01bb6d2bd512483ba5a6db57acc9e661 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/01bb6d2bd512483ba5a6db57acc9e661 2024-11-23T13:22:58,305 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/38e6a7b901ea4d96865ff97c5a0b80c4 2024-11-23T13:22:58,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9d90876c9da047a3b57f32419dbad86e 2024-11-23T13:22:58,306 DEBUG [StoreCloser-TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/579207da89c44e698b3d64bdb0ad3180 2024-11-23T13:22:58,309 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits/306.seqid, newMaxSeqId=306, maxSeqId=4 2024-11-23T13:22:58,310 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6. 2024-11-23T13:22:58,310 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] regionserver.HRegion(1635): Region close journal for 2d79dbed6dcd0da8c65ba796b67996f6: 2024-11-23T13:22:58,311 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=121}] handler.UnassignRegionHandler(170): Closed 2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,311 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=2d79dbed6dcd0da8c65ba796b67996f6, regionState=CLOSED 2024-11-23T13:22:58,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-23T13:22:58,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseRegionProcedure 2d79dbed6dcd0da8c65ba796b67996f6, server=ba2e440802a7,33173,1732368061317 in 1.8540 sec 2024-11-23T13:22:58,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-11-23T13:22:58,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=2d79dbed6dcd0da8c65ba796b67996f6, UNASSIGN in 1.8570 sec 2024-11-23T13:22:58,315 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-23T13:22:58,315 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8600 sec 2024-11-23T13:22:58,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368178316"}]},"ts":"1732368178316"} 2024-11-23T13:22:58,317 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:22:58,319 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:22:58,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8710 sec 2024-11-23T13:22:58,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-23T13:22:58,554 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-23T13:22:58,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:22:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,556 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=122, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-23T13:22:58,557 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=122, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,559 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,560 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits] 2024-11-23T13:22:58,562 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/0dceb4caeff74d06a2b3522e33f0bc57 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/0dceb4caeff74d06a2b3522e33f0bc57 2024-11-23T13:22:58,563 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/319e274c01c14783b0e3f9ad976c6e82 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/319e274c01c14783b0e3f9ad976c6e82 2024-11-23T13:22:58,564 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/816fcf290262419ebc94d4f51573f5d0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/816fcf290262419ebc94d4f51573f5d0 2024-11-23T13:22:58,565 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/82b26059e49449039e7fdccc19ffee88 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/A/82b26059e49449039e7fdccc19ffee88 2024-11-23T13:22:58,566 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/36c4e3f199d146b389f672eae6f9911b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/36c4e3f199d146b389f672eae6f9911b 2024-11-23T13:22:58,567 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4d0cee8fe59d447f9e1b1a89f5684622 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/4d0cee8fe59d447f9e1b1a89f5684622 2024-11-23T13:22:58,568 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/71b8472ef3ae441286bd09ec5ec9c343 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/71b8472ef3ae441286bd09ec5ec9c343 2024-11-23T13:22:58,569 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/fecc0ad6212441349f87a9b24f36596e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/B/fecc0ad6212441349f87a9b24f36596e 2024-11-23T13:22:58,570 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/450d300bb8644c51a2ef68a040a23742 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/450d300bb8644c51a2ef68a040a23742 2024-11-23T13:22:58,571 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e92e44d4f7149d0aa3dd15e608218fb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/9e92e44d4f7149d0aa3dd15e608218fb 2024-11-23T13:22:58,572 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/bd9d4983b70f43b29c569466200f0927 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/bd9d4983b70f43b29c569466200f0927 2024-11-23T13:22:58,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/ed74e7061ce4431ea702347a7e4aeeb1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/C/ed74e7061ce4431ea702347a7e4aeeb1 2024-11-23T13:22:58,575 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits/306.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6/recovered.edits/306.seqid 2024-11-23T13:22:58,575 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,575 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:22:58,576 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:22:58,576 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T13:22:58,578 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230161f5ccdf754816aeb4ca2f530a9080_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411230161f5ccdf754816aeb4ca2f530a9080_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,579 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123055abb5e8aec4e9b89c6b53481ca0549_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123055abb5e8aec4e9b89c6b53481ca0549_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,580 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123314da1d0410d4e95a74984d4eaef882e_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123314da1d0410d4e95a74984d4eaef882e_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,581 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d6ff8412afd4e6a93588838eeb1b5f5_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411233d6ff8412afd4e6a93588838eeb1b5f5_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,582 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112349d92b7a4b2448bb9691a1430270fd7c_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112349d92b7a4b2448bb9691a1430270fd7c_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,583 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354bd1a666b1f49ecb6c0a2e544242b2d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112354bd1a666b1f49ecb6c0a2e544242b2d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,583 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357fdbdb7823d43f99550ea900f3d5bb8_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112357fdbdb7823d43f99550ea900f3d5bb8_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,584 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b7127de50e245a792fcb9a14d9861b1_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411235b7127de50e245a792fcb9a14d9861b1_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,585 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b96aab086ad40688430ecab87b6969c_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236b96aab086ad40688430ecab87b6969c_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,586 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112393c31432cc324b19aaa3c46cff0193ce_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112393c31432cc324b19aaa3c46cff0193ce_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,587 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239d9977c74e484922922c5972f424f451_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239d9977c74e484922922c5972f424f451_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,588 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a46a0cb234234c68968a5c2059f80f86_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a46a0cb234234c68968a5c2059f80f86_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,589 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4708b29971c4826a382b5beb8a65d3d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a4708b29971c4826a382b5beb8a65d3d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,590 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e0a515631d7b4288960d9d9c33c9c757_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e0a515631d7b4288960d9d9c33c9c757_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,591 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e32cf7a32d1c4084ba52feb40dd8827d_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123e32cf7a32d1c4084ba52feb40dd8827d_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,592 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ea30491b4a764b20ada23c93d9f3b596_2d79dbed6dcd0da8c65ba796b67996f6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123ea30491b4a764b20ada23c93d9f3b596_2d79dbed6dcd0da8c65ba796b67996f6 2024-11-23T13:22:58,592 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:22:58,594 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=122, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,595 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:22:58,597 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:22:58,597 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=122, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,597 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:22:58,598 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368178597"}]},"ts":"9223372036854775807"} 2024-11-23T13:22:58,599 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:22:58,599 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2d79dbed6dcd0da8c65ba796b67996f6, NAME => 'TestAcidGuarantees,,1732368151527.2d79dbed6dcd0da8c65ba796b67996f6.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:22:58,599 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:22:58,599 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368178599"}]},"ts":"9223372036854775807"} 2024-11-23T13:22:58,600 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:22:58,602 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=122, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 47 msec 2024-11-23T13:22:58,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-23T13:22:58,657 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-23T13:22:58,666 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=241 (was 237) - Thread LEAK? -, OpenFileDescriptor=459 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=309 (was 327), ProcessCount=11 (was 11), AvailableMemoryMB=3655 (was 3709) 2024-11-23T13:22:58,675 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=241, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=309, ProcessCount=11, AvailableMemoryMB=3654 2024-11-23T13:22:58,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:22:58,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:22:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:22:58,678 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:22:58,678 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:58,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 123 2024-11-23T13:22:58,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:22:58,678 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:22:58,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742285_1461 (size=960) 2024-11-23T13:22:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:22:58,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:22:59,085 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:22:59,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742286_1462 (size=53) 2024-11-23T13:22:59,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 827437fc31cfaf801e96764bfc0e4aaa, disabling compactions & flushes 2024-11-23T13:22:59,490 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. after waiting 0 ms 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,490 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,490 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:22:59,491 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:22:59,492 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368179491"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368179491"}]},"ts":"1732368179491"} 2024-11-23T13:22:59,493 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:22:59,493 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:22:59,493 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368179493"}]},"ts":"1732368179493"} 2024-11-23T13:22:59,494 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:22:59,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, ASSIGN}] 2024-11-23T13:22:59,498 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, ASSIGN 2024-11-23T13:22:59,499 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:22:59,649 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=827437fc31cfaf801e96764bfc0e4aaa, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:59,650 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; OpenRegionProcedure 827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:22:59,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:22:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:22:59,801 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:22:59,804 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,804 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7285): Opening region: {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:22:59,804 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,804 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:22:59,804 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7327): checking encryption for 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,804 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(7330): checking classloading for 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,806 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,807 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:59,807 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 827437fc31cfaf801e96764bfc0e4aaa columnFamilyName A 2024-11-23T13:22:59,807 DEBUG [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:59,807 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(327): Store=827437fc31cfaf801e96764bfc0e4aaa/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:59,807 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,808 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:59,808 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 827437fc31cfaf801e96764bfc0e4aaa columnFamilyName B 2024-11-23T13:22:59,808 DEBUG [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:59,809 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(327): Store=827437fc31cfaf801e96764bfc0e4aaa/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:59,809 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,809 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:22:59,809 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 827437fc31cfaf801e96764bfc0e4aaa columnFamilyName C 2024-11-23T13:22:59,810 DEBUG [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:22:59,810 INFO [StoreOpener-827437fc31cfaf801e96764bfc0e4aaa-1 {}] regionserver.HStore(327): Store=827437fc31cfaf801e96764bfc0e4aaa/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:22:59,810 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,811 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,811 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,812 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:22:59,813 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1085): writing seq id for 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:22:59,814 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:22:59,814 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1102): Opened 827437fc31cfaf801e96764bfc0e4aaa; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72550090, jitterRate=0.08108058571815491}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:22:59,815 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegion(1001): Region open journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:22:59,815 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., pid=125, masterSystemTime=1732368179801 2024-11-23T13:22:59,817 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,817 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=125}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:22:59,817 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=827437fc31cfaf801e96764bfc0e4aaa, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:22:59,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-23T13:22:59,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; OpenRegionProcedure 827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 in 168 msec 2024-11-23T13:22:59,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-23T13:22:59,820 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, ASSIGN in 322 msec 2024-11-23T13:22:59,820 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:22:59,820 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368179820"}]},"ts":"1732368179820"} 2024-11-23T13:22:59,821 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:22:59,823 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=123, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:22:59,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1470 sec 2024-11-23T13:23:00,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=123 2024-11-23T13:23:00,782 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 123 completed 2024-11-23T13:23:00,783 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c60eb7d to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@695c2253 2024-11-23T13:23:00,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63cefe40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,788 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,789 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,790 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:23:00,791 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:23:00,792 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79b10416 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7177efc9 2024-11-23T13:23:00,795 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65df2359, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,795 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2f142b04 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61d38088 2024-11-23T13:23:00,798 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d0ab200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0de9f076 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7043f683 2024-11-23T13:23:00,801 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5871c039, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,802 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4414259d to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b0c2472 2024-11-23T13:23:00,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7daa5922, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,805 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-23T13:23:00,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,808 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d672ed2 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f7c40ba 2024-11-23T13:23:00,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2070263a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,810 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cf40102 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@41b0e7b6 2024-11-23T13:23:00,813 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6050584c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,813 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x496fe03f to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@f2423f3 2024-11-23T13:23:00,816 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd48863, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,816 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3652e74d to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@184771cf 2024-11-23T13:23:00,822 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51196534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,822 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2405c04e to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76f0408 2024-11-23T13:23:00,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc5e114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:00,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:00,829 DEBUG [hconnection-0x7f92839c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-11-23T13:23:00,830 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:00,830 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57562, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:00,830 DEBUG [hconnection-0x5413c4b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,830 DEBUG [hconnection-0x6cb11359-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,831 DEBUG [hconnection-0x8608415-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,831 DEBUG [hconnection-0x6d042fb5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,831 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:00,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:00,831 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57580, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,831 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,831 DEBUG [hconnection-0x10c84c72-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,833 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,833 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,833 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57606, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,834 DEBUG [hconnection-0x3dd5bd86-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,835 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57612, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,838 DEBUG [hconnection-0x1b9962ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,838 DEBUG [hconnection-0x1b6bf802-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,839 DEBUG [hconnection-0x6ec608b8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:00,839 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,839 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,839 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57626, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:00,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:00,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:00,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:00,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368240856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368240855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368240858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368240860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368240860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/fa5680fa82d0413081e008f0be6b7f2a is 50, key is test_row_0/A:col10/1732368180842/Put/seqid=0 2024-11-23T13:23:00,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742287_1463 (size=12001) 2024-11-23T13:23:00,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:00,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368240959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368240961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368240963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368240963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:00,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368240963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,983 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:00,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:00,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:00,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:01,135 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368241162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368241164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368241165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368241166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368241167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/fa5680fa82d0413081e008f0be6b7f2a 2024-11-23T13:23:01,288 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/7ef9b4822ff546219a240387103adda6 is 50, key is test_row_0/B:col10/1732368180842/Put/seqid=0 2024-11-23T13:23:01,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742288_1464 (size=12001) 2024-11-23T13:23:01,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:01,441 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368241464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368241470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368241471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368241473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368241473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/7ef9b4822ff546219a240387103adda6 2024-11-23T13:23:01,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b6012cf0e5f14089931661511964c5b1 is 50, key is test_row_0/C:col10/1732368180842/Put/seqid=0 2024-11-23T13:23:01,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742289_1465 (size=12001) 2024-11-23T13:23:01,746 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,898 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:01,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:01,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:01,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:01,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368241969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368241974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368241977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368241979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:01,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:01,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368241980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:02,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:02,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:02,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:02,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:02,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:02,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:02,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:02,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:02,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b6012cf0e5f14089931661511964c5b1 2024-11-23T13:23:02,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/fa5680fa82d0413081e008f0be6b7f2a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a 2024-11-23T13:23:02,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:23:02,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/7ef9b4822ff546219a240387103adda6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6 2024-11-23T13:23:02,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:23:02,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b6012cf0e5f14089931661511964c5b1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1 2024-11-23T13:23:02,162 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1, entries=150, sequenceid=14, filesize=11.7 K 2024-11-23T13:23:02,162 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 827437fc31cfaf801e96764bfc0e4aaa in 1320ms, sequenceid=14, compaction requested=false 2024-11-23T13:23:02,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:02,204 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:02,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-11-23T13:23:02,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:02,204 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:23:02,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:02,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:02,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:02,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/88f47dd7c04a4ec5b7e99df33e3a64a9 is 50, key is test_row_0/A:col10/1732368180857/Put/seqid=0 2024-11-23T13:23:02,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742290_1466 (size=12001) 2024-11-23T13:23:02,216 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/88f47dd7c04a4ec5b7e99df33e3a64a9 2024-11-23T13:23:02,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a18330f2049846ad87eed87bbde2e5e7 is 50, key is test_row_0/B:col10/1732368180857/Put/seqid=0 2024-11-23T13:23:02,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742291_1467 (size=12001) 2024-11-23T13:23:02,640 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a18330f2049846ad87eed87bbde2e5e7 2024-11-23T13:23:02,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f7bf81d67fd7442491e2067c3e2c8e37 is 50, key is test_row_0/C:col10/1732368180857/Put/seqid=0 2024-11-23T13:23:02,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742292_1468 (size=12001) 2024-11-23T13:23:02,651 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f7bf81d67fd7442491e2067c3e2c8e37 2024-11-23T13:23:02,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/88f47dd7c04a4ec5b7e99df33e3a64a9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9 2024-11-23T13:23:02,657 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:23:02,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a18330f2049846ad87eed87bbde2e5e7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7 2024-11-23T13:23:02,661 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:23:02,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f7bf81d67fd7442491e2067c3e2c8e37 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37 2024-11-23T13:23:02,665 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37, entries=150, sequenceid=37, filesize=11.7 K 2024-11-23T13:23:02,666 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 827437fc31cfaf801e96764bfc0e4aaa in 461ms, sequenceid=37, compaction requested=false 2024-11-23T13:23:02,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:02,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:02,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-11-23T13:23:02,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-11-23T13:23:02,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-23T13:23:02,669 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8370 sec 2024-11-23T13:23:02,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 1.8420 sec 2024-11-23T13:23:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-23T13:23:02,934 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-23T13:23:02,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-23T13:23:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:02,937 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:02,938 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:02,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:02,985 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:02,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:02,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/e38974f23f9f4dd2bed987c4ea2c924b is 50, key is test_row_0/A:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:02,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742293_1469 (size=19021) 2024-11-23T13:23:03,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368243015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368243015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368243020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368243021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368243021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:03,089 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:03,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368243123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368243123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368243126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368243131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368243131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:03,242 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368243327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368243328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368243329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368243334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368243334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,394 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/e38974f23f9f4dd2bed987c4ea2c924b 2024-11-23T13:23:03,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0c657e70726c4a97a1fe104ebbeadf26 is 50, key is test_row_0/B:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:03,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742294_1470 (size=12001) 2024-11-23T13:23:03,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:03,547 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:03,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368243633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368243633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368243637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368243639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,642 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368243639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,700 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:03,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:03,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0c657e70726c4a97a1fe104ebbeadf26 2024-11-23T13:23:03,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/825a63d42f984b6983726adc34139239 is 50, key is test_row_0/C:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:03,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742295_1471 (size=12001) 2024-11-23T13:23:03,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/825a63d42f984b6983726adc34139239 2024-11-23T13:23:03,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/e38974f23f9f4dd2bed987c4ea2c924b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b 2024-11-23T13:23:03,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b, entries=300, sequenceid=48, filesize=18.6 K 2024-11-23T13:23:03,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0c657e70726c4a97a1fe104ebbeadf26 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26 2024-11-23T13:23:03,826 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26, entries=150, sequenceid=48, filesize=11.7 K 2024-11-23T13:23:03,827 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:23:03,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/825a63d42f984b6983726adc34139239 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239 2024-11-23T13:23:03,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239, entries=150, sequenceid=48, filesize=11.7 K 2024-11-23T13:23:03,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 827437fc31cfaf801e96764bfc0e4aaa in 851ms, sequenceid=48, compaction requested=true 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:03,836 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:03,836 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:03,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:03,837 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:03,837 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:03,837 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:03,837 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,837 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:03,837 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,837 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=42.0 K 2024-11-23T13:23:03,837 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=35.2 K 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa5680fa82d0413081e008f0be6b7f2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368180836 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ef9b4822ff546219a240387103adda6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368180836 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88f47dd7c04a4ec5b7e99df33e3a64a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368180854 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a18330f2049846ad87eed87bbde2e5e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368180854 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e38974f23f9f4dd2bed987c4ea2c924b, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182977 2024-11-23T13:23:03,838 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c657e70726c4a97a1fe104ebbeadf26, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182982 2024-11-23T13:23:03,852 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:03,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,853 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:03,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:03,858 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#396 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:03,858 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#395 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:03,858 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/299ff02f8c164067acf94a25137fdacd is 50, key is test_row_0/A:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:03,858 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/2970864988cc4271893c7b0db8d8c2a2 is 50, key is test_row_0/B:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:03,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5350f64650b841a6b7825be7297ff335 is 50, key is test_row_0/A:col10/1732368183008/Put/seqid=0 2024-11-23T13:23:03,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742296_1472 (size=12104) 2024-11-23T13:23:03,874 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/299ff02f8c164067acf94a25137fdacd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/299ff02f8c164067acf94a25137fdacd 2024-11-23T13:23:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742297_1473 (size=12104) 2024-11-23T13:23:03,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742298_1474 (size=12001) 2024-11-23T13:23:03,878 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5350f64650b841a6b7825be7297ff335 2024-11-23T13:23:03,879 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into 299ff02f8c164067acf94a25137fdacd(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:03,879 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:03,879 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368183836; duration=0sec 2024-11-23T13:23:03,879 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:03,879 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:03,879 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:03,880 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:03,880 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:03,880 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:03,880 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=35.2 K 2024-11-23T13:23:03,880 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6012cf0e5f14089931661511964c5b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1732368180836 2024-11-23T13:23:03,881 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7bf81d67fd7442491e2067c3e2c8e37, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732368180854 2024-11-23T13:23:03,882 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 825a63d42f984b6983726adc34139239, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182982 2024-11-23T13:23:03,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/78ae4970317044aea983c00c9ca1af00 is 50, key is test_row_0/B:col10/1732368183008/Put/seqid=0 2024-11-23T13:23:03,894 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:03,894 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/40717ad9ae954b84967aa864ddf90ec3 is 50, key is test_row_0/C:col10/1732368182983/Put/seqid=0 2024-11-23T13:23:03,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742299_1475 (size=12001) 2024-11-23T13:23:03,896 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/78ae4970317044aea983c00c9ca1af00 2024-11-23T13:23:03,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742300_1476 (size=12104) 2024-11-23T13:23:03,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/afdf06a3c44540919a77fcdc9a892460 is 50, key is test_row_0/C:col10/1732368183008/Put/seqid=0 2024-11-23T13:23:03,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742301_1477 (size=12001) 2024-11-23T13:23:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:04,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:04,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:04,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368244148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368244181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368244181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368244181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368244181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,281 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/2970864988cc4271893c7b0db8d8c2a2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/2970864988cc4271893c7b0db8d8c2a2 2024-11-23T13:23:04,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368244282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,285 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 2970864988cc4271893c7b0db8d8c2a2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:04,285 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:04,285 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368183836; duration=0sec 2024-11-23T13:23:04,285 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:04,285 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:04,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368244287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368244287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368244287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368244287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,303 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/40717ad9ae954b84967aa864ddf90ec3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/40717ad9ae954b84967aa864ddf90ec3 2024-11-23T13:23:04,307 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into 40717ad9ae954b84967aa864ddf90ec3(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:04,308 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:04,308 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368183836; duration=0sec 2024-11-23T13:23:04,308 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:04,308 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:04,310 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/afdf06a3c44540919a77fcdc9a892460 2024-11-23T13:23:04,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5350f64650b841a6b7825be7297ff335 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335 2024-11-23T13:23:04,317 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335, entries=150, sequenceid=74, filesize=11.7 K 2024-11-23T13:23:04,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/78ae4970317044aea983c00c9ca1af00 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00 2024-11-23T13:23:04,321 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00, entries=150, sequenceid=74, filesize=11.7 K 2024-11-23T13:23:04,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/afdf06a3c44540919a77fcdc9a892460 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460 2024-11-23T13:23:04,325 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460, entries=150, sequenceid=74, filesize=11.7 K 2024-11-23T13:23:04,325 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 827437fc31cfaf801e96764bfc0e4aaa in 472ms, sequenceid=74, compaction requested=false 2024-11-23T13:23:04,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:04,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:04,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-23T13:23:04,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-23T13:23:04,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-23T13:23:04,328 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3880 sec 2024-11-23T13:23:04,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.3930 sec 2024-11-23T13:23:04,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:04,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:04,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:04,493 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ac3a29c15a2c4e228a023fc26ee925ca is 50, key is test_row_0/A:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742302_1478 (size=12001) 2024-11-23T13:23:04,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368244513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368244514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368244515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368244515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368244518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368244620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368244620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368244621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368244621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368244621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368244824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368244825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368244825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368244826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:04,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368244826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:04,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ac3a29c15a2c4e228a023fc26ee925ca 2024-11-23T13:23:04,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d5cf35d0dc994d8f9af92fd92002271f is 50, key is test_row_0/B:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742303_1479 (size=12001) 2024-11-23T13:23:04,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d5cf35d0dc994d8f9af92fd92002271f 2024-11-23T13:23:04,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0c69b898443e4bb2a4ae3b3f1d00c96d is 50, key is test_row_0/C:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742304_1480 (size=12001) 2024-11-23T13:23:04,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0c69b898443e4bb2a4ae3b3f1d00c96d 2024-11-23T13:23:04,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ac3a29c15a2c4e228a023fc26ee925ca as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca 2024-11-23T13:23:04,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T13:23:04,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d5cf35d0dc994d8f9af92fd92002271f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f 2024-11-23T13:23:04,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T13:23:04,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0c69b898443e4bb2a4ae3b3f1d00c96d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d 2024-11-23T13:23:04,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-23T13:23:04,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 827437fc31cfaf801e96764bfc0e4aaa in 453ms, sequenceid=90, compaction requested=true 2024-11-23T13:23:04,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:04,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:04,942 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:04,942 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:04,942 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:04,943 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:04,943 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:04,943 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/2970864988cc4271893c7b0db8d8c2a2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=35.3 K 2024-11-23T13:23:04,943 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/299ff02f8c164067acf94a25137fdacd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=35.3 K 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2970864988cc4271893c7b0db8d8c2a2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182982 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 299ff02f8c164067acf94a25137fdacd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182982 2024-11-23T13:23:04,943 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 78ae4970317044aea983c00c9ca1af00, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732368183008 2024-11-23T13:23:04,944 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5350f64650b841a6b7825be7297ff335, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732368183008 2024-11-23T13:23:04,944 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting d5cf35d0dc994d8f9af92fd92002271f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:04,944 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac3a29c15a2c4e228a023fc26ee925ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:04,951 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#404 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:04,952 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b6e8149a3cb546af9780af0f8c95ab6f is 50, key is test_row_0/A:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,952 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#405 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:04,952 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/10a1bd9c7881434099789b76fea4269e is 50, key is test_row_0/B:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742305_1481 (size=12207) 2024-11-23T13:23:04,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742306_1482 (size=12207) 2024-11-23T13:23:04,969 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/10a1bd9c7881434099789b76fea4269e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/10a1bd9c7881434099789b76fea4269e 2024-11-23T13:23:04,974 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 10a1bd9c7881434099789b76fea4269e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:04,974 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:04,974 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368184942; duration=0sec 2024-11-23T13:23:04,974 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:04,974 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:04,975 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:04,975 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:04,975 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:04,976 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:04,976 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/40717ad9ae954b84967aa864ddf90ec3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=35.3 K 2024-11-23T13:23:04,976 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 40717ad9ae954b84967aa864ddf90ec3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=48, earliestPutTs=1732368182982 2024-11-23T13:23:04,976 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting afdf06a3c44540919a77fcdc9a892460, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1732368183008 2024-11-23T13:23:04,977 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c69b898443e4bb2a4ae3b3f1d00c96d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:04,983 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#406 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:04,983 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/cfb146faae62466fba0ce9805fc7015d is 50, key is test_row_0/C:col10/1732368184487/Put/seqid=0 2024-11-23T13:23:04,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742307_1483 (size=12207) 2024-11-23T13:23:05,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-23T13:23:05,040 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-23T13:23:05,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-23T13:23:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:05,043 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:05,044 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:05,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:05,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:05,132 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:05,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:05,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/d7c8957797c34b2380e93126d0392770 is 50, key is test_row_0/A:col10/1732368185132/Put/seqid=0 2024-11-23T13:23:05,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742308_1484 (size=16681) 2024-11-23T13:23:05,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368245137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368245138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:05,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368245139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368245142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368245143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368245244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368245244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368245245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368245248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368245250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:05,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,360 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b6e8149a3cb546af9780af0f8c95ab6f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b6e8149a3cb546af9780af0f8c95ab6f 2024-11-23T13:23:05,364 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into b6e8149a3cb546af9780af0f8c95ab6f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:05,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:05,364 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368184942; duration=0sec 2024-11-23T13:23:05,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:05,364 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:05,392 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/cfb146faae62466fba0ce9805fc7015d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cfb146faae62466fba0ce9805fc7015d 2024-11-23T13:23:05,396 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into cfb146faae62466fba0ce9805fc7015d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:05,396 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:05,396 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368184942; duration=0sec 2024-11-23T13:23:05,396 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:05,396 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:05,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368245449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368245449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368245449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368245452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368245454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,500 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,501 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/d7c8957797c34b2380e93126d0392770 2024-11-23T13:23:05,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/81d22435ae4842a6b309fc3a689d367b is 50, key is test_row_0/B:col10/1732368185132/Put/seqid=0 2024-11-23T13:23:05,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742309_1485 (size=12001) 2024-11-23T13:23:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:05,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,654 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368245754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368245754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368245755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368245755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368245759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,806 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/81d22435ae4842a6b309fc3a689d367b 2024-11-23T13:23:05,958 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:05,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:05,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:05,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:05,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:05,961 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7b0d6c8fe173415f827d82785c33cf46 is 50, key is test_row_0/C:col10/1732368185132/Put/seqid=0 2024-11-23T13:23:05,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742310_1486 (size=12001) 2024-11-23T13:23:06,111 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:06,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:06,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:06,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:06,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:06,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:06,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368246258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368246258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368246259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:06,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368246260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:06,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:06,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:06,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:06,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:06,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:06,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368246264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,366 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7b0d6c8fe173415f827d82785c33cf46 2024-11-23T13:23:06,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/d7c8957797c34b2380e93126d0392770 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770 2024-11-23T13:23:06,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770, entries=250, sequenceid=116, filesize=16.3 K 2024-11-23T13:23:06,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/81d22435ae4842a6b309fc3a689d367b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b 2024-11-23T13:23:06,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b, entries=150, sequenceid=116, filesize=11.7 K 2024-11-23T13:23:06,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7b0d6c8fe173415f827d82785c33cf46 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46 2024-11-23T13:23:06,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46, entries=150, sequenceid=116, filesize=11.7 K 2024-11-23T13:23:06,382 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 827437fc31cfaf801e96764bfc0e4aaa in 1251ms, sequenceid=116, compaction requested=false 2024-11-23T13:23:06,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:06,416 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:06,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-23T13:23:06,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:06,417 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:06,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:06,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d is 50, key is test_row_0/A:col10/1732368185138/Put/seqid=0 2024-11-23T13:23:06,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742311_1487 (size=12001) 2024-11-23T13:23:06,825 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d 2024-11-23T13:23:06,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d744f5c586374b12bf7a9f78aa09e318 is 50, key is test_row_0/B:col10/1732368185138/Put/seqid=0 2024-11-23T13:23:06,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742312_1488 (size=12001) 2024-11-23T13:23:07,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:07,241 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d744f5c586374b12bf7a9f78aa09e318 2024-11-23T13:23:07,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/803adf32adc54b1f9941cadcb7b2b8b6 is 50, key is test_row_0/C:col10/1732368185138/Put/seqid=0 2024-11-23T13:23:07,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742313_1489 (size=12001) 2024-11-23T13:23:07,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:07,269 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:07,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368247325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368247324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368247327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368247328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368247328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368247434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368247434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368247434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368247434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368247435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368247639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368247639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368247640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368247640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368247640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,651 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/803adf32adc54b1f9941cadcb7b2b8b6 2024-11-23T13:23:07,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d 2024-11-23T13:23:07,659 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d, entries=150, sequenceid=129, filesize=11.7 K 2024-11-23T13:23:07,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/d744f5c586374b12bf7a9f78aa09e318 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318 2024-11-23T13:23:07,663 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318, entries=150, sequenceid=129, filesize=11.7 K 2024-11-23T13:23:07,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/803adf32adc54b1f9941cadcb7b2b8b6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6 2024-11-23T13:23:07,667 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6, entries=150, sequenceid=129, filesize=11.7 K 2024-11-23T13:23:07,668 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 827437fc31cfaf801e96764bfc0e4aaa in 1251ms, sequenceid=129, compaction requested=true 2024-11-23T13:23:07,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:07,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:07,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-23T13:23:07,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-23T13:23:07,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-23T13:23:07,670 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6250 sec 2024-11-23T13:23:07,671 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.6280 sec 2024-11-23T13:23:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:07,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:07,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:07,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ee5dfd809b4242af8f5add0be7f73cba is 50, key is test_row_0/A:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:07,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368247951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368247952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368247953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742314_1490 (size=16931) 2024-11-23T13:23:07,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368247956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:07,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:07,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368247956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368248057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368248058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368248058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368248065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368248065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368248262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368248263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368248263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368248269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368248269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,358 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ee5dfd809b4242af8f5add0be7f73cba 2024-11-23T13:23:08,365 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/252e82bac0a74b70a4d8b123ef4f8425 is 50, key is test_row_0/B:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:08,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742315_1491 (size=12151) 2024-11-23T13:23:08,369 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/252e82bac0a74b70a4d8b123ef4f8425 2024-11-23T13:23:08,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f55fc349dfc14c61ac672f36300b4c5e is 50, key is test_row_0/C:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742316_1492 (size=12151) 2024-11-23T13:23:08,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f55fc349dfc14c61ac672f36300b4c5e 2024-11-23T13:23:08,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ee5dfd809b4242af8f5add0be7f73cba as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba 2024-11-23T13:23:08,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba, entries=250, sequenceid=156, filesize=16.5 K 2024-11-23T13:23:08,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/252e82bac0a74b70a4d8b123ef4f8425 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425 2024-11-23T13:23:08,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425, entries=150, sequenceid=156, filesize=11.9 K 2024-11-23T13:23:08,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f55fc349dfc14c61ac672f36300b4c5e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e 2024-11-23T13:23:08,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e, entries=150, sequenceid=156, filesize=11.9 K 2024-11-23T13:23:08,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 827437fc31cfaf801e96764bfc0e4aaa in 448ms, sequenceid=156, compaction requested=true 2024-11-23T13:23:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:08,397 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:08,397 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57820 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:08,398 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:08,398 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:08,398 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/10a1bd9c7881434099789b76fea4269e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=47.2 K 2024-11-23T13:23:08,398 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b6e8149a3cb546af9780af0f8c95ab6f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=56.5 K 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6e8149a3cb546af9780af0f8c95ab6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:08,398 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 10a1bd9c7881434099789b76fea4269e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7c8957797c34b2380e93126d0392770, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732368184513 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 81d22435ae4842a6b309fc3a689d367b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732368184514 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 875b49bfa7ab46a8ae2e0b6c3b1ac59d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368185138 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting d744f5c586374b12bf7a9f78aa09e318, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368185138 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 252e82bac0a74b70a4d8b123ef4f8425, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:08,399 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee5dfd809b4242af8f5add0be7f73cba, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:08,407 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#416 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:08,407 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:08,407 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/f74439901a454d198f2e9da78ea26daa is 50, key is test_row_0/A:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:08,407 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0354a71d76554b958542c8d144c22bb0 is 50, key is test_row_0/B:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:08,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742317_1493 (size=12493) 2024-11-23T13:23:08,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742318_1494 (size=12493) 2024-11-23T13:23:08,427 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/f74439901a454d198f2e9da78ea26daa as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f74439901a454d198f2e9da78ea26daa 2024-11-23T13:23:08,431 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0354a71d76554b958542c8d144c22bb0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0354a71d76554b958542c8d144c22bb0 2024-11-23T13:23:08,432 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into f74439901a454d198f2e9da78ea26daa(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:08,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:08,433 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=12, startTime=1732368188397; duration=0sec 2024-11-23T13:23:08,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:08,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:08,433 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:08,434 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:08,434 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:08,434 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:08,434 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cfb146faae62466fba0ce9805fc7015d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=47.2 K 2024-11-23T13:23:08,434 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfb146faae62466fba0ce9805fc7015d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732368184150 2024-11-23T13:23:08,435 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b0d6c8fe173415f827d82785c33cf46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732368184514 2024-11-23T13:23:08,435 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 803adf32adc54b1f9941cadcb7b2b8b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1732368185138 2024-11-23T13:23:08,435 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f55fc349dfc14c61ac672f36300b4c5e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:08,436 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 0354a71d76554b958542c8d144c22bb0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:08,436 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:08,436 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=12, startTime=1732368188397; duration=0sec 2024-11-23T13:23:08,436 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:08,436 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:08,443 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:08,444 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/705d74db8a274a0c9e6190a0ee1282de is 50, key is test_row_0/C:col10/1732368187327/Put/seqid=0 2024-11-23T13:23:08,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742319_1495 (size=12493) 2024-11-23T13:23:08,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:08,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:08,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:08,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b39e361ab7cc4a7a91a8dc6a77200525 is 50, key is test_row_0/A:col10/1732368188568/Put/seqid=0 2024-11-23T13:23:08,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742320_1496 (size=14541) 2024-11-23T13:23:08,581 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b39e361ab7cc4a7a91a8dc6a77200525 2024-11-23T13:23:08,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/b71a03a800db42dd9a2efe32315a52b8 is 50, key is test_row_0/B:col10/1732368188568/Put/seqid=0 2024-11-23T13:23:08,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742321_1497 (size=12151) 2024-11-23T13:23:08,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368248597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368248598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368248601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368248603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368248603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368248704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368248705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368248708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368248709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368248709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,858 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/705d74db8a274a0c9e6190a0ee1282de as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/705d74db8a274a0c9e6190a0ee1282de 2024-11-23T13:23:08,862 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into 705d74db8a274a0c9e6190a0ee1282de(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:08,862 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:08,862 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=12, startTime=1732368188397; duration=0sec 2024-11-23T13:23:08,862 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:08,862 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:08,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368248908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368248909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368248913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368248913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:08,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368248914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:08,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/b71a03a800db42dd9a2efe32315a52b8 2024-11-23T13:23:09,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/3e71ef376ec64a3e90bac6675fbff557 is 50, key is test_row_0/C:col10/1732368188568/Put/seqid=0 2024-11-23T13:23:09,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742322_1498 (size=12151) 2024-11-23T13:23:09,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-23T13:23:09,148 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-23T13:23:09,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:09,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-23T13:23:09,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:09,150 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:09,151 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:09,151 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:09,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368249212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368249216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368249217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368249217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368249218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:09,302 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T13:23:09,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:09,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:09,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:09,303 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:09,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:09,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:09,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/3e71ef376ec64a3e90bac6675fbff557 2024-11-23T13:23:09,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b39e361ab7cc4a7a91a8dc6a77200525 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525 2024-11-23T13:23:09,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525, entries=200, sequenceid=169, filesize=14.2 K 2024-11-23T13:23:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/b71a03a800db42dd9a2efe32315a52b8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8 2024-11-23T13:23:09,416 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T13:23:09,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/3e71ef376ec64a3e90bac6675fbff557 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557 2024-11-23T13:23:09,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557, entries=150, sequenceid=169, filesize=11.9 K 2024-11-23T13:23:09,421 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 827437fc31cfaf801e96764bfc0e4aaa in 851ms, sequenceid=169, compaction requested=false 2024-11-23T13:23:09,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:09,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:09,456 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-23T13:23:09,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:09,456 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:09,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:09,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/c72646285de347939c3b9c2dea93147b is 50, key is test_row_0/A:col10/1732368188602/Put/seqid=0 2024-11-23T13:23:09,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742323_1499 (size=12151) 2024-11-23T13:23:09,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:09,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:09,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368249727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368249727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368249728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368249732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,737 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368249733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:09,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368249834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368249834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368249838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368249838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:09,868 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/c72646285de347939c3b9c2dea93147b 2024-11-23T13:23:09,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/c88ef7df1dd54579ae8d957c7632dcf1 is 50, key is test_row_0/B:col10/1732368188602/Put/seqid=0 2024-11-23T13:23:09,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742324_1500 (size=12151) 2024-11-23T13:23:10,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368250037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368250037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368250043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368250044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:10,280 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/c88ef7df1dd54579ae8d957c7632dcf1 2024-11-23T13:23:10,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/a78b5b6a444e4638b1cc7a453e91e480 is 50, key is test_row_0/C:col10/1732368188602/Put/seqid=0 2024-11-23T13:23:10,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742325_1501 (size=12151) 2024-11-23T13:23:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368250341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368250343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368250347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368250348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,692 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/a78b5b6a444e4638b1cc7a453e91e480 2024-11-23T13:23:10,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/c72646285de347939c3b9c2dea93147b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b 2024-11-23T13:23:10,700 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T13:23:10,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/c88ef7df1dd54579ae8d957c7632dcf1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1 2024-11-23T13:23:10,703 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T13:23:10,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/a78b5b6a444e4638b1cc7a453e91e480 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480 2024-11-23T13:23:10,707 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480, entries=150, sequenceid=196, filesize=11.9 K 2024-11-23T13:23:10,708 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 827437fc31cfaf801e96764bfc0e4aaa in 1252ms, sequenceid=196, compaction requested=true 2024-11-23T13:23:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:10,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-23T13:23:10,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-23T13:23:10,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-23T13:23:10,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5580 sec 2024-11-23T13:23:10,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.5610 sec 2024-11-23T13:23:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:10,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:10,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:10,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/06f0d17a44a24c17ad36a8a9b50c3e00 is 50, key is test_row_0/A:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:10,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742326_1502 (size=14541) 2024-11-23T13:23:10,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/06f0d17a44a24c17ad36a8a9b50c3e00 2024-11-23T13:23:10,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/4906941c2ced43e78bb02048ae1b0716 is 50, key is test_row_0/B:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:10,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742327_1503 (size=12151) 2024-11-23T13:23:10,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368250849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368250851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368250852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368250852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368250853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368250956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368250961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:10,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:10,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368250962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368251161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368251165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368251166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/4906941c2ced43e78bb02048ae1b0716 2024-11-23T13:23:11,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/984aa338b6c24476b7e1b9c456060b2f is 50, key is test_row_0/C:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:11,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742328_1504 (size=12151) 2024-11-23T13:23:11,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/984aa338b6c24476b7e1b9c456060b2f 2024-11-23T13:23:11,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/06f0d17a44a24c17ad36a8a9b50c3e00 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00 2024-11-23T13:23:11,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00, entries=200, sequenceid=207, filesize=14.2 K 2024-11-23T13:23:11,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/4906941c2ced43e78bb02048ae1b0716 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716 2024-11-23T13:23:11,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716, entries=150, sequenceid=207, filesize=11.9 K 2024-11-23T13:23:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/984aa338b6c24476b7e1b9c456060b2f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f 2024-11-23T13:23:11,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f, entries=150, sequenceid=207, filesize=11.9 K 2024-11-23T13:23:11,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 827437fc31cfaf801e96764bfc0e4aaa in 445ms, sequenceid=207, compaction requested=true 2024-11-23T13:23:11,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:11,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:11,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:11,201 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:11,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:11,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:11,201 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:11,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:11,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53726 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:11,202 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,202 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,202 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0354a71d76554b958542c8d144c22bb0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=47.8 K 2024-11-23T13:23:11,202 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f74439901a454d198f2e9da78ea26daa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=52.5 K 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f74439901a454d198f2e9da78ea26daa, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:11,202 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0354a71d76554b958542c8d144c22bb0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b39e361ab7cc4a7a91a8dc6a77200525, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368187951 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b71a03a800db42dd9a2efe32315a52b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368187951 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c88ef7df1dd54579ae8d957c7632dcf1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732368188600 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting c72646285de347939c3b9c2dea93147b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732368188600 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06f0d17a44a24c17ad36a8a9b50c3e00, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:11,203 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 4906941c2ced43e78bb02048ae1b0716, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:11,210 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#428 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:11,211 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/150c3e85a2e94fefab18bfae8aa758e0 is 50, key is test_row_0/A:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:11,211 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:11,212 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/460e11b7db6941fea2e7433449f6fe66 is 50, key is test_row_0/B:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:11,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742329_1505 (size=12629) 2024-11-23T13:23:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742330_1506 (size=12629) 2024-11-23T13:23:11,220 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/150c3e85a2e94fefab18bfae8aa758e0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/150c3e85a2e94fefab18bfae8aa758e0 2024-11-23T13:23:11,226 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into 150c3e85a2e94fefab18bfae8aa758e0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:11,226 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:11,226 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=12, startTime=1732368191200; duration=0sec 2024-11-23T13:23:11,226 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:11,226 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:11,226 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:11,227 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:11,227 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:11,227 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,227 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/705d74db8a274a0c9e6190a0ee1282de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=47.8 K 2024-11-23T13:23:11,227 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 705d74db8a274a0c9e6190a0ee1282de, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732368187320 2024-11-23T13:23:11,228 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e71ef376ec64a3e90bac6675fbff557, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732368187951 2024-11-23T13:23:11,229 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting a78b5b6a444e4638b1cc7a453e91e480, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732368188600 2024-11-23T13:23:11,229 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 984aa338b6c24476b7e1b9c456060b2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:11,237 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:11,237 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/5b8c39f707da49ff93e6ee116f8d44fc is 50, key is test_row_0/C:col10/1732368189732/Put/seqid=0 2024-11-23T13:23:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742331_1507 (size=12629) 2024-11-23T13:23:11,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-23T13:23:11,257 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-23T13:23:11,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:11,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-23T13:23:11,260 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:11,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T13:23:11,260 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:11,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T13:23:11,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-23T13:23:11,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,412 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:11,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/58ddeefcf7294d2bbaf50ca3d4fe7880 is 50, key is test_row_0/A:col10/1732368190851/Put/seqid=0 2024-11-23T13:23:11,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742332_1508 (size=12151) 2024-11-23T13:23:11,422 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/58ddeefcf7294d2bbaf50ca3d4fe7880 2024-11-23T13:23:11,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/119637f9585c40148fcfade6509e2f47 is 50, key is test_row_0/B:col10/1732368190851/Put/seqid=0 2024-11-23T13:23:11,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742333_1509 (size=12151) 2024-11-23T13:23:11,432 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/119637f9585c40148fcfade6509e2f47 2024-11-23T13:23:11,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b8cfb9d04eaa44f9b503ba11ef202aa9 is 50, key is test_row_0/C:col10/1732368190851/Put/seqid=0 2024-11-23T13:23:11,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742334_1510 (size=12151) 2024-11-23T13:23:11,442 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b8cfb9d04eaa44f9b503ba11ef202aa9 2024-11-23T13:23:11,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/58ddeefcf7294d2bbaf50ca3d4fe7880 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880 2024-11-23T13:23:11,452 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880, entries=150, sequenceid=233, filesize=11.9 K 2024-11-23T13:23:11,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/119637f9585c40148fcfade6509e2f47 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47 2024-11-23T13:23:11,456 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47, entries=150, sequenceid=233, filesize=11.9 K 2024-11-23T13:23:11,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/b8cfb9d04eaa44f9b503ba11ef202aa9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9 2024-11-23T13:23:11,459 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9, entries=150, sequenceid=233, filesize=11.9 K 2024-11-23T13:23:11,460 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 827437fc31cfaf801e96764bfc0e4aaa in 48ms, sequenceid=233, compaction requested=false 2024-11-23T13:23:11,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:11,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-23T13:23:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-23T13:23:11,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-23T13:23:11,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 201 msec 2024-11-23T13:23:11,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 205 msec 2024-11-23T13:23:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:11,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:11,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/112d0215c24d4518ab0fd7745e6e180d is 50, key is test_row_0/A:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:11,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742335_1511 (size=19321) 2024-11-23T13:23:11,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368251525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368251526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368251527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-23T13:23:11,562 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-23T13:23:11,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-23T13:23:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:11,564 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:11,564 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:11,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:11,621 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/460e11b7db6941fea2e7433449f6fe66 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/460e11b7db6941fea2e7433449f6fe66 2024-11-23T13:23:11,625 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 460e11b7db6941fea2e7433449f6fe66(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:11,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:11,625 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=12, startTime=1732368191201; duration=0sec 2024-11-23T13:23:11,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:11,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:11,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368251628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368251631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368251633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,646 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/5b8c39f707da49ff93e6ee116f8d44fc as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5b8c39f707da49ff93e6ee116f8d44fc 2024-11-23T13:23:11,650 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into 5b8c39f707da49ff93e6ee116f8d44fc(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:11,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:11,650 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=12, startTime=1732368191201; duration=0sec 2024-11-23T13:23:11,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:11,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:11,716 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:11,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368251835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368251836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368251836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:11,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368251864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368251871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:11,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:11,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:11,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:11,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:11,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/112d0215c24d4518ab0fd7745e6e180d 2024-11-23T13:23:11,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9bdf994858f947ff91e878321337e176 is 50, key is test_row_0/B:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:11,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742336_1512 (size=12151) 2024-11-23T13:23:12,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:12,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:12,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,028 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368252138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368252139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368252140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:12,180 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:12,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:12,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,306 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9bdf994858f947ff91e878321337e176 2024-11-23T13:23:12,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0f0c03bf1b1b47fda6c7435b061e464f is 50, key is test_row_0/C:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:12,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742337_1513 (size=12151) 2024-11-23T13:23:12,319 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0f0c03bf1b1b47fda6c7435b061e464f 2024-11-23T13:23:12,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/112d0215c24d4518ab0fd7745e6e180d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d 2024-11-23T13:23:12,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d, entries=300, sequenceid=245, filesize=18.9 K 2024-11-23T13:23:12,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9bdf994858f947ff91e878321337e176 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176 2024-11-23T13:23:12,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,333 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:12,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176, entries=150, sequenceid=245, filesize=11.9 K 2024-11-23T13:23:12,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/0f0c03bf1b1b47fda6c7435b061e464f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f 2024-11-23T13:23:12,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f, entries=150, sequenceid=245, filesize=11.9 K 2024-11-23T13:23:12,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 827437fc31cfaf801e96764bfc0e4aaa in 867ms, sequenceid=245, compaction requested=true 2024-11-23T13:23:12,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:12,348 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:12,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:12,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:12,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:12,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:12,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:12,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:12,349 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:12,349 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 44101 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:12,349 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:12,349 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,349 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/150c3e85a2e94fefab18bfae8aa758e0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=43.1 K 2024-11-23T13:23:12,350 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:12,350 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:12,350 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 150c3e85a2e94fefab18bfae8aa758e0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:12,350 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,350 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/460e11b7db6941fea2e7433449f6fe66, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.1 K 2024-11-23T13:23:12,350 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58ddeefcf7294d2bbaf50ca3d4fe7880, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732368190848 2024-11-23T13:23:12,350 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 460e11b7db6941fea2e7433449f6fe66, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:12,351 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 119637f9585c40148fcfade6509e2f47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732368190848 2024-11-23T13:23:12,351 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 112d0215c24d4518ab0fd7745e6e180d, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:12,351 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bdf994858f947ff91e878321337e176, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:12,359 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:12,359 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:12,360 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0180628cf4b24e849cabf575bb691943 is 50, key is test_row_0/B:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:12,360 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ff3e616141e9452a9cc26617909cc473 is 50, key is test_row_0/A:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:12,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742338_1514 (size=12731) 2024-11-23T13:23:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742339_1515 (size=12731) 2024-11-23T13:23:12,386 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/ff3e616141e9452a9cc26617909cc473 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ff3e616141e9452a9cc26617909cc473 2024-11-23T13:23:12,394 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into ff3e616141e9452a9cc26617909cc473(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:12,394 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:12,394 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368192348; duration=0sec 2024-11-23T13:23:12,394 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:12,394 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:12,394 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:12,395 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:12,395 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:12,395 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,395 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5b8c39f707da49ff93e6ee116f8d44fc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.1 K 2024-11-23T13:23:12,396 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b8c39f707da49ff93e6ee116f8d44fc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732368189727 2024-11-23T13:23:12,396 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8cfb9d04eaa44f9b503ba11ef202aa9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1732368190848 2024-11-23T13:23:12,396 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f0c03bf1b1b47fda6c7435b061e464f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:12,404 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#439 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:12,404 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f3c819a9e8dd45df8e578e04984ff990 is 50, key is test_row_0/C:col10/1732368191481/Put/seqid=0 2024-11-23T13:23:12,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742340_1516 (size=12731) 2024-11-23T13:23:12,422 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/f3c819a9e8dd45df8e578e04984ff990 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f3c819a9e8dd45df8e578e04984ff990 2024-11-23T13:23:12,430 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into f3c819a9e8dd45df8e578e04984ff990(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:12,430 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:12,430 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368192349; duration=0sec 2024-11-23T13:23:12,430 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:12,430 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:12,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,484 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-23T13:23:12,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:12,485 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:12,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:12,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/f94f0304a35a4974ab58b6b2d9d73772 is 50, key is test_row_0/A:col10/1732368191519/Put/seqid=0 2024-11-23T13:23:12,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742341_1517 (size=12301) 2024-11-23T13:23:12,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:12,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:12,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368252653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368252654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368252655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:12,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368252759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368252761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368252762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,777 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/0180628cf4b24e849cabf575bb691943 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0180628cf4b24e849cabf575bb691943 2024-11-23T13:23:12,781 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 0180628cf4b24e849cabf575bb691943(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:12,781 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:12,781 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368192348; duration=0sec 2024-11-23T13:23:12,781 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:12,781 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:12,894 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/f94f0304a35a4974ab58b6b2d9d73772 2024-11-23T13:23:12,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/fa7f2f31af88498fb4f456c2bac639d3 is 50, key is test_row_0/B:col10/1732368191519/Put/seqid=0 2024-11-23T13:23:12,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742342_1518 (size=12301) 2024-11-23T13:23:12,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368252965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368252966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:12,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:12,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368252967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368253269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368253270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368253271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,305 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/fa7f2f31af88498fb4f456c2bac639d3 2024-11-23T13:23:13,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/090a15293df74a0ba746310de88def57 is 50, key is test_row_0/C:col10/1732368191519/Put/seqid=0 2024-11-23T13:23:13,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742343_1519 (size=12301) 2024-11-23T13:23:13,320 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/090a15293df74a0ba746310de88def57 2024-11-23T13:23:13,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/f94f0304a35a4974ab58b6b2d9d73772 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772 2024-11-23T13:23:13,328 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772, entries=150, sequenceid=273, filesize=12.0 K 2024-11-23T13:23:13,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/fa7f2f31af88498fb4f456c2bac639d3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3 2024-11-23T13:23:13,332 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3, entries=150, sequenceid=273, filesize=12.0 K 2024-11-23T13:23:13,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/090a15293df74a0ba746310de88def57 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57 2024-11-23T13:23:13,337 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57, entries=150, sequenceid=273, filesize=12.0 K 2024-11-23T13:23:13,337 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 827437fc31cfaf801e96764bfc0e4aaa in 853ms, sequenceid=273, compaction requested=false 2024-11-23T13:23:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:13,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-23T13:23:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-23T13:23:13,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-23T13:23:13,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7750 sec 2024-11-23T13:23:13,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.7790 sec 2024-11-23T13:23:13,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-23T13:23:13,668 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-23T13:23:13,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-23T13:23:13,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:13,670 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:13,671 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:13,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:13,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:13,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:13,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:13,782 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/56e7c20b856945b2aa2f99dd6ea76db4 is 50, key is test_row_0/A:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:13,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742344_1520 (size=17181) 2024-11-23T13:23:13,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368253809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368253813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368253815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:13,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:13,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:13,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368253879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,885 DEBUG [Thread-2058 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:13,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368253889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,893 DEBUG [Thread-2060 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:13,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368253916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368253916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:13,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368253918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:13,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:13,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:13,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:13,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:13,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:13,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:13,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368254122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368254122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,128 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368254122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:14,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:14,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/56e7c20b856945b2aa2f99dd6ea76db4 2024-11-23T13:23:14,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/118332302296443287b3e360851b2370 is 50, key is test_row_0/B:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:14,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742345_1521 (size=12301) 2024-11-23T13:23:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:14,280 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368254427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,432 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368254428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:14,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368254430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,585 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:14,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/118332302296443287b3e360851b2370 2024-11-23T13:23:14,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/92dde3d1dd7b4ccab97ce733f030301e is 50, key is test_row_0/C:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:14,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742346_1522 (size=12301) 2024-11-23T13:23:14,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/92dde3d1dd7b4ccab97ce733f030301e 2024-11-23T13:23:14,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/56e7c20b856945b2aa2f99dd6ea76db4 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4 2024-11-23T13:23:14,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4, entries=250, sequenceid=288, filesize=16.8 K 2024-11-23T13:23:14,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/118332302296443287b3e360851b2370 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370 2024-11-23T13:23:14,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T13:23:14,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/92dde3d1dd7b4ccab97ce733f030301e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e 2024-11-23T13:23:14,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e, entries=150, sequenceid=288, filesize=12.0 K 2024-11-23T13:23:14,622 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 827437fc31cfaf801e96764bfc0e4aaa in 844ms, sequenceid=288, compaction requested=true 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:14,622 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:14,622 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:14,622 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:14,624 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42213 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:14,624 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:14,624 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,624 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ff3e616141e9452a9cc26617909cc473, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=41.2 K 2024-11-23T13:23:14,624 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:14,624 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:14,624 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff3e616141e9452a9cc26617909cc473, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:14,624 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,624 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0180628cf4b24e849cabf575bb691943, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.5 K 2024-11-23T13:23:14,625 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f94f0304a35a4974ab58b6b2d9d73772, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732368191519 2024-11-23T13:23:14,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0180628cf4b24e849cabf575bb691943, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:14,625 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56e7c20b856945b2aa2f99dd6ea76db4, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:14,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting fa7f2f31af88498fb4f456c2bac639d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732368191519 2024-11-23T13:23:14,625 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 118332302296443287b3e360851b2370, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:14,632 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#446 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:14,633 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/1e64a02bdbbd4f27ad28248c58cd739a is 50, key is test_row_0/A:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:14,634 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:14,634 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9ce1709ad32c481d8b38336c792602de is 50, key is test_row_0/B:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:14,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742348_1524 (size=12983) 2024-11-23T13:23:14,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742347_1523 (size=12983) 2024-11-23T13:23:14,652 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/1e64a02bdbbd4f27ad28248c58cd739a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/1e64a02bdbbd4f27ad28248c58cd739a 2024-11-23T13:23:14,653 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9ce1709ad32c481d8b38336c792602de as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9ce1709ad32c481d8b38336c792602de 2024-11-23T13:23:14,659 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into 1e64a02bdbbd4f27ad28248c58cd739a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:14,659 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:14,659 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368194622; duration=0sec 2024-11-23T13:23:14,659 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:14,659 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:14,659 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:14,660 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:14,660 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:14,660 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,661 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f3c819a9e8dd45df8e578e04984ff990, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.5 K 2024-11-23T13:23:14,661 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 9ce1709ad32c481d8b38336c792602de(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:14,661 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:14,661 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368194622; duration=0sec 2024-11-23T13:23:14,661 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3c819a9e8dd45df8e578e04984ff990, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1732368191470 2024-11-23T13:23:14,661 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:14,661 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:14,662 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 090a15293df74a0ba746310de88def57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1732368191519 2024-11-23T13:23:14,662 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92dde3d1dd7b4ccab97ce733f030301e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:14,668 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:14,669 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/49286b299f6b4704bb5951a02c94934c is 50, key is test_row_0/C:col10/1732368193777/Put/seqid=0 2024-11-23T13:23:14,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742349_1525 (size=12983) 2024-11-23T13:23:14,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-23T13:23:14,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:14,738 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:14,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:14,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/7b0f05ae2e194b4d90c499765429a71f is 50, key is test_row_0/A:col10/1732368193807/Put/seqid=0 2024-11-23T13:23:14,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742350_1526 (size=12301) 2024-11-23T13:23:14,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:14,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:14,952 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368254947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:14,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368254952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:14,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368254952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368255053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368255057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368255057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,079 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/49286b299f6b4704bb5951a02c94934c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/49286b299f6b4704bb5951a02c94934c 2024-11-23T13:23:15,083 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into 49286b299f6b4704bb5951a02c94934c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:15,083 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:15,083 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368194622; duration=0sec 2024-11-23T13:23:15,083 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:15,083 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:15,147 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/7b0f05ae2e194b4d90c499765429a71f 2024-11-23T13:23:15,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9488d869813c473ebbb64e77cdc9d1d6 is 50, key is test_row_0/B:col10/1732368193807/Put/seqid=0 2024-11-23T13:23:15,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742351_1527 (size=12301) 2024-11-23T13:23:15,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368255258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368255259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368255259, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368255562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,564 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9488d869813c473ebbb64e77cdc9d1d6 2024-11-23T13:23:15,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368255563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:15,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368255564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/8c51fe8532844a63a1fcba42ee745590 is 50, key is test_row_0/C:col10/1732368193807/Put/seqid=0 2024-11-23T13:23:15,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742352_1528 (size=12301) 2024-11-23T13:23:15,578 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/8c51fe8532844a63a1fcba42ee745590 2024-11-23T13:23:15,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/7b0f05ae2e194b4d90c499765429a71f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f 2024-11-23T13:23:15,585 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f, entries=150, sequenceid=312, filesize=12.0 K 2024-11-23T13:23:15,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9488d869813c473ebbb64e77cdc9d1d6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6 2024-11-23T13:23:15,589 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6, entries=150, sequenceid=312, filesize=12.0 K 2024-11-23T13:23:15,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/8c51fe8532844a63a1fcba42ee745590 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590 2024-11-23T13:23:15,592 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590, entries=150, sequenceid=312, filesize=12.0 K 2024-11-23T13:23:15,593 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 827437fc31cfaf801e96764bfc0e4aaa in 855ms, sequenceid=312, compaction requested=false 2024-11-23T13:23:15,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:15,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:15,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T13:23:15,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-23T13:23:15,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-23T13:23:15,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9230 sec 2024-11-23T13:23:15,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.9270 sec 2024-11-23T13:23:15,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-23T13:23:15,774 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-23T13:23:15,775 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:15,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-23T13:23:15,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:15,776 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:15,777 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:15,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:15,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:15,928 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:15,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-23T13:23:15,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:15,929 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:23:15,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:15,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:15,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/874072aeac3142028a65d1c15d67cd7d is 50, key is test_row_0/A:col10/1732368194945/Put/seqid=0 2024-11-23T13:23:15,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742353_1529 (size=12301) 2024-11-23T13:23:16,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:16,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:16,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:16,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368256102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368256104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368256105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368256207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368256210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368256210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,339 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/874072aeac3142028a65d1c15d67cd7d 2024-11-23T13:23:16,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/1b98fe69a2e5450cb82e0cf97c755259 is 50, key is test_row_0/B:col10/1732368194945/Put/seqid=0 2024-11-23T13:23:16,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742354_1530 (size=12301) 2024-11-23T13:23:16,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:16,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368256413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368256413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368256413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368256719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368256719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:16,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368256719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:16,750 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/1b98fe69a2e5450cb82e0cf97c755259 2024-11-23T13:23:16,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/23c9ec6ecfa040a692b6158c85eeb257 is 50, key is test_row_0/C:col10/1732368194945/Put/seqid=0 2024-11-23T13:23:16,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742355_1531 (size=12301) 2024-11-23T13:23:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:17,162 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/23c9ec6ecfa040a692b6158c85eeb257 2024-11-23T13:23:17,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/874072aeac3142028a65d1c15d67cd7d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d 2024-11-23T13:23:17,170 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T13:23:17,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/1b98fe69a2e5450cb82e0cf97c755259 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259 2024-11-23T13:23:17,174 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T13:23:17,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/23c9ec6ecfa040a692b6158c85eeb257 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257 2024-11-23T13:23:17,178 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257, entries=150, sequenceid=327, filesize=12.0 K 2024-11-23T13:23:17,178 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 827437fc31cfaf801e96764bfc0e4aaa in 1249ms, sequenceid=327, compaction requested=true 2024-11-23T13:23:17,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:17,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:17,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T13:23:17,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-23T13:23:17,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-23T13:23:17,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4020 sec 2024-11-23T13:23:17,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.4060 sec 2024-11-23T13:23:17,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:17,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:17,225 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:17,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a is 50, key is test_row_0/A:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:17,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742356_1532 (size=14741) 2024-11-23T13:23:17,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368257242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,250 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368257242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368257249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368257351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368257351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368257357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368257556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368257556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368257561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a 2024-11-23T13:23:17,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/f2ea5c2f01d548ddbdc72276f6a7ba35 is 50, key is test_row_0/B:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:17,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742357_1533 (size=12301) 2024-11-23T13:23:17,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368257861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368257864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368257867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-23T13:23:17,880 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-23T13:23:17,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-23T13:23:17,883 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:17,883 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:17,884 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:17,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57562 deadline: 1732368257897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,901 DEBUG [Thread-2058 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8168 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:17,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:17,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57628 deadline: 1732368257919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:17,926 DEBUG [Thread-2060 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8193 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., hostname=ba2e440802a7,33173,1732368061317, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:17,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:18,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T13:23:18,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:18,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:18,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:18,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/f2ea5c2f01d548ddbdc72276f6a7ba35 2024-11-23T13:23:18,049 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7cf95e61dbc54fe4957955868d391fcd is 50, key is test_row_0/C:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:18,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742358_1534 (size=12301) 2024-11-23T13:23:18,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7cf95e61dbc54fe4957955868d391fcd 2024-11-23T13:23:18,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a 2024-11-23T13:23:18,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a, entries=200, sequenceid=350, filesize=14.4 K 2024-11-23T13:23:18,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/f2ea5c2f01d548ddbdc72276f6a7ba35 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35 2024-11-23T13:23:18,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35, entries=150, sequenceid=350, filesize=12.0 K 2024-11-23T13:23:18,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7cf95e61dbc54fe4957955868d391fcd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd 2024-11-23T13:23:18,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd, entries=150, sequenceid=350, filesize=12.0 K 2024-11-23T13:23:18,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 827437fc31cfaf801e96764bfc0e4aaa in 842ms, sequenceid=350, compaction requested=true 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:18,067 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:18,067 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:18,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:18,068 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:18,068 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:18,068 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:18,068 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,068 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:18,068 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,068 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/1e64a02bdbbd4f27ad28248c58cd739a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=51.1 K 2024-11-23T13:23:18,069 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9ce1709ad32c481d8b38336c792602de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=48.7 K 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ce1709ad32c481d8b38336c792602de, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e64a02bdbbd4f27ad28248c58cd739a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b0f05ae2e194b4d90c499765429a71f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732368193807 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9488d869813c473ebbb64e77cdc9d1d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732368193807 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b98fe69a2e5450cb82e0cf97c755259, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732368194945 2024-11-23T13:23:18,069 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 874072aeac3142028a65d1c15d67cd7d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732368194945 2024-11-23T13:23:18,070 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ea5c2f01d548ddbdc72276f6a7ba35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:18,070 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b78f0c2b37af4ac9bbeacdb7f2c30b6a, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:18,078 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:18,078 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/fd33f95ab6bd4d9b90ab3c38053eb702 is 50, key is test_row_0/A:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:18,078 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:18,079 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/1cabcc1fc1664cce8f77262a44df581c is 50, key is test_row_0/B:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:18,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742360_1536 (size=13119) 2024-11-23T13:23:18,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742359_1535 (size=13119) 2024-11-23T13:23:18,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:18,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-23T13:23:18,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,188 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:18,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:18,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:18,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b2c0bab5a79b467e81c8441502af26ae is 50, key is test_row_0/A:col10/1732368197240/Put/seqid=0 2024-11-23T13:23:18,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742361_1537 (size=12301) 2024-11-23T13:23:18,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:18,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:18,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368258413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368258419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368258421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:18,496 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/fd33f95ab6bd4d9b90ab3c38053eb702 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fd33f95ab6bd4d9b90ab3c38053eb702 2024-11-23T13:23:18,496 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/1cabcc1fc1664cce8f77262a44df581c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1cabcc1fc1664cce8f77262a44df581c 2024-11-23T13:23:18,500 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 1cabcc1fc1664cce8f77262a44df581c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:18,500 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into fd33f95ab6bd4d9b90ab3c38053eb702(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:18,500 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=12, startTime=1732368198067; duration=0sec 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:18,500 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=12, startTime=1732368198067; duration=0sec 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:18,500 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:18,501 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-23T13:23:18,501 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:18,501 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:18,501 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/49286b299f6b4704bb5951a02c94934c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=48.7 K 2024-11-23T13:23:18,502 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 49286b299f6b4704bb5951a02c94934c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732368192651 2024-11-23T13:23:18,502 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c51fe8532844a63a1fcba42ee745590, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732368193807 2024-11-23T13:23:18,502 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 23c9ec6ecfa040a692b6158c85eeb257, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732368194945 2024-11-23T13:23:18,502 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cf95e61dbc54fe4957955868d391fcd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:18,510 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#461 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:18,511 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/d061da3654ed43f8b98480649807bfac is 50, key is test_row_0/C:col10/1732368197223/Put/seqid=0 2024-11-23T13:23:18,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742362_1538 (size=13119) 2024-11-23T13:23:18,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368258524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368258526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368258531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,598 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b2c0bab5a79b467e81c8441502af26ae 2024-11-23T13:23:18,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/406532d54fda43479d8ba891c23c88a0 is 50, key is test_row_0/B:col10/1732368197240/Put/seqid=0 2024-11-23T13:23:18,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742363_1539 (size=12301) 2024-11-23T13:23:18,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368258731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368258732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368258739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:18,919 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/d061da3654ed43f8b98480649807bfac as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/d061da3654ed43f8b98480649807bfac 2024-11-23T13:23:18,923 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into d061da3654ed43f8b98480649807bfac(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:18,923 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:18,923 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=12, startTime=1732368198067; duration=0sec 2024-11-23T13:23:18,923 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:18,923 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:18,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:19,013 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/406532d54fda43479d8ba891c23c88a0 2024-11-23T13:23:19,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7753fb79a5464667a16a872b20b80e64 is 50, key is test_row_0/C:col10/1732368197240/Put/seqid=0 2024-11-23T13:23:19,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742364_1540 (size=12301) 2024-11-23T13:23:19,024 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7753fb79a5464667a16a872b20b80e64 2024-11-23T13:23:19,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b2c0bab5a79b467e81c8441502af26ae as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae 2024-11-23T13:23:19,030 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae, entries=150, sequenceid=363, filesize=12.0 K 2024-11-23T13:23:19,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/406532d54fda43479d8ba891c23c88a0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0 2024-11-23T13:23:19,034 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0, entries=150, sequenceid=363, filesize=12.0 K 2024-11-23T13:23:19,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/7753fb79a5464667a16a872b20b80e64 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64 2024-11-23T13:23:19,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368259032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,037 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64, entries=150, sequenceid=363, filesize=12.0 K 2024-11-23T13:23:19,038 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 827437fc31cfaf801e96764bfc0e4aaa in 850ms, sequenceid=363, compaction requested=false 2024-11-23T13:23:19,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:19,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:19,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-23T13:23:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-23T13:23:19,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-23T13:23:19,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1550 sec 2024-11-23T13:23:19,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.1600 sec 2024-11-23T13:23:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:19,043 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:19,043 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:19,047 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b027f26cc6d840799007ea2bb4f29beb is 50, key is test_row_0/A:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:19,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742365_1541 (size=14741) 2024-11-23T13:23:19,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368259065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368259070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,175 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368259171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368259176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368259376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368259381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b027f26cc6d840799007ea2bb4f29beb 2024-11-23T13:23:19,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9f37f214336740b7b14e703f40ae5354 is 50, key is test_row_0/B:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:19,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742366_1542 (size=12301) 2024-11-23T13:23:19,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368259537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368259679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368259686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:19,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9f37f214336740b7b14e703f40ae5354 2024-11-23T13:23:19,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/78e779f613b043bcafee0e67c94f7836 is 50, key is test_row_0/C:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:19,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742367_1543 (size=12301) 2024-11-23T13:23:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-23T13:23:19,987 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-23T13:23:19,988 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-23T13:23:19,990 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:19,990 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:19,990 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T13:23:20,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T13:23:20,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T13:23:20,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:20,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:20,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:20,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:20,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:20,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57606 deadline: 1732368260186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:20,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57596 deadline: 1732368260195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,272 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/78e779f613b043bcafee0e67c94f7836 2024-11-23T13:23:20,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/b027f26cc6d840799007ea2bb4f29beb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb 2024-11-23T13:23:20,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb, entries=200, sequenceid=390, filesize=14.4 K 2024-11-23T13:23:20,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9f37f214336740b7b14e703f40ae5354 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354 2024-11-23T13:23:20,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354, entries=150, sequenceid=390, filesize=12.0 K 2024-11-23T13:23:20,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/78e779f613b043bcafee0e67c94f7836 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836 2024-11-23T13:23:20,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836, entries=150, sequenceid=390, filesize=12.0 K 2024-11-23T13:23:20,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 827437fc31cfaf801e96764bfc0e4aaa in 1246ms, sequenceid=390, compaction requested=true 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:20,290 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:20,290 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:20,290 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:20,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T13:23:20,292 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40161 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:20,292 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:20,292 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:20,292 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:20,292 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,292 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,293 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fd33f95ab6bd4d9b90ab3c38053eb702, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=39.2 K 2024-11-23T13:23:20,293 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1cabcc1fc1664cce8f77262a44df581c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.8 K 2024-11-23T13:23:20,293 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd33f95ab6bd4d9b90ab3c38053eb702, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:20,293 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cabcc1fc1664cce8f77262a44df581c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:20,293 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2c0bab5a79b467e81c8441502af26ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732368197240 2024-11-23T13:23:20,293 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 406532d54fda43479d8ba891c23c88a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732368197240 2024-11-23T13:23:20,294 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting b027f26cc6d840799007ea2bb4f29beb, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198412 2024-11-23T13:23:20,294 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f37f214336740b7b14e703f40ae5354, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198413 2024-11-23T13:23:20,295 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-23T13:23:20,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,295 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:20,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5056496822924c3985379e50d3f74af1 is 50, key is test_row_0/A:col10/1732368199064/Put/seqid=0 2024-11-23T13:23:20,300 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:20,301 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/04af2291779c4662a0d23ca9d6bba0c2 is 50, key is test_row_0/A:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:20,301 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#469 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:20,301 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/5e8e33831cb640039480a98cfa8cbe18 is 50, key is test_row_0/B:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:20,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742368_1544 (size=12301) 2024-11-23T13:23:20,305 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5056496822924c3985379e50d3f74af1 2024-11-23T13:23:20,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742369_1545 (size=13221) 2024-11-23T13:23:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742370_1546 (size=13221) 2024-11-23T13:23:20,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 is 50, key is test_row_0/B:col10/1732368199064/Put/seqid=0 2024-11-23T13:23:20,319 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/5e8e33831cb640039480a98cfa8cbe18 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/5e8e33831cb640039480a98cfa8cbe18 2024-11-23T13:23:20,319 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/04af2291779c4662a0d23ca9d6bba0c2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/04af2291779c4662a0d23ca9d6bba0c2 2024-11-23T13:23:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742371_1547 (size=12301) 2024-11-23T13:23:20,323 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 2024-11-23T13:23:20,324 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 5e8e33831cb640039480a98cfa8cbe18(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:20,324 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:20,324 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368200290; duration=0sec 2024-11-23T13:23:20,324 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:20,325 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:20,325 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:20,326 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into 04af2291779c4662a0d23ca9d6bba0c2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:20,326 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:20,326 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368200290; duration=0sec 2024-11-23T13:23:20,326 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:20,326 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:20,326 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:20,326 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:20,327 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,327 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/d061da3654ed43f8b98480649807bfac, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.8 K 2024-11-23T13:23:20,327 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting d061da3654ed43f8b98480649807bfac, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1732368196097 2024-11-23T13:23:20,327 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 7753fb79a5464667a16a872b20b80e64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1732368197240 2024-11-23T13:23:20,328 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 78e779f613b043bcafee0e67c94f7836, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198413 2024-11-23T13:23:20,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/ec61ad8eee394b1ea9ced9142aeccbc1 is 50, key is test_row_0/C:col10/1732368199064/Put/seqid=0 2024-11-23T13:23:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742372_1548 (size=12301) 2024-11-23T13:23:20,335 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#472 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:20,335 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/1be67d4d29af49d9958bf60b1a051669 is 50, key is test_row_0/C:col10/1732368198420/Put/seqid=0 2024-11-23T13:23:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742373_1549 (size=13221) 2024-11-23T13:23:20,347 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/1be67d4d29af49d9958bf60b1a051669 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/1be67d4d29af49d9958bf60b1a051669 2024-11-23T13:23:20,353 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into 1be67d4d29af49d9958bf60b1a051669(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:20,353 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:20,353 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368200290; duration=0sec 2024-11-23T13:23:20,353 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:20,353 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:20,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:20,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. as already flushing 2024-11-23T13:23:20,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T13:23:20,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:20,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368260636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,734 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/ec61ad8eee394b1ea9ced9142aeccbc1 2024-11-23T13:23:20,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/5056496822924c3985379e50d3f74af1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1 2024-11-23T13:23:20,741 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1, entries=150, sequenceid=402, filesize=12.0 K 2024-11-23T13:23:20,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 2024-11-23T13:23:20,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:20,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57626 deadline: 1732368260742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:20,745 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87, entries=150, sequenceid=402, filesize=12.0 K 2024-11-23T13:23:20,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/ec61ad8eee394b1ea9ced9142aeccbc1 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1 2024-11-23T13:23:20,749 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1, entries=150, sequenceid=402, filesize=12.0 K 2024-11-23T13:23:20,750 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 827437fc31cfaf801e96764bfc0e4aaa in 455ms, sequenceid=402, compaction requested=false 2024-11-23T13:23:20,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:20,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:20,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-23T13:23:20,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-23T13:23:20,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-23T13:23:20,752 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 761 msec 2024-11-23T13:23:20,757 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 768 msec 2024-11-23T13:23:20,836 DEBUG [Thread-2073 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2405c04e to 127.0.0.1:51875 2024-11-23T13:23:20,837 DEBUG [Thread-2073 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,837 DEBUG [Thread-2067 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cf40102 to 127.0.0.1:51875 2024-11-23T13:23:20,837 DEBUG [Thread-2067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,838 DEBUG [Thread-2071 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3652e74d to 127.0.0.1:51875 2024-11-23T13:23:20,838 DEBUG [Thread-2071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,839 DEBUG [Thread-2069 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x496fe03f to 127.0.0.1:51875 2024-11-23T13:23:20,839 DEBUG [Thread-2069 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,840 DEBUG [Thread-2065 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d672ed2 to 127.0.0.1:51875 2024-11-23T13:23:20,840 DEBUG [Thread-2065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:20,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:20,947 DEBUG [Thread-2054 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79b10416 to 127.0.0.1:51875 2024-11-23T13:23:20,947 DEBUG [Thread-2054 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:20,947 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:20,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/6b73af59fc1045b6875015bce6c4010d is 50, key is test_row_0/A:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:20,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742374_1550 (size=12301) 2024-11-23T13:23:21,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-23T13:23:21,094 INFO [Thread-2064 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-23T13:23:21,194 DEBUG [Thread-2056 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2f142b04 to 127.0.0.1:51875 2024-11-23T13:23:21,194 DEBUG [Thread-2056 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:21,208 DEBUG [Thread-2062 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:51875 2024-11-23T13:23:21,208 DEBUG [Thread-2062 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:21,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/6b73af59fc1045b6875015bce6c4010d 2024-11-23T13:23:21,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a241e05ca6474c3884da99925e9fa6e9 is 50, key is test_row_0/B:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:21,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742375_1551 (size=12301) 2024-11-23T13:23:21,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a241e05ca6474c3884da99925e9fa6e9 2024-11-23T13:23:21,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/5210021fafd44e5dae735c8b5413031f is 50, key is test_row_0/C:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:21,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742376_1552 (size=12301) 2024-11-23T13:23:22,173 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/5210021fafd44e5dae735c8b5413031f 2024-11-23T13:23:22,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/6b73af59fc1045b6875015bce6c4010d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d 2024-11-23T13:23:22,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d, entries=150, sequenceid=430, filesize=12.0 K 2024-11-23T13:23:22,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/a241e05ca6474c3884da99925e9fa6e9 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9 2024-11-23T13:23:22,182 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9, entries=150, sequenceid=430, filesize=12.0 K 2024-11-23T13:23:22,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/5210021fafd44e5dae735c8b5413031f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f 2024-11-23T13:23:22,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f, entries=150, sequenceid=430, filesize=12.0 K 2024-11-23T13:23:22,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=13.42 KB/13740 for 827437fc31cfaf801e96764bfc0e4aaa in 1239ms, sequenceid=430, compaction requested=true 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 827437fc31cfaf801e96764bfc0e4aaa:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:22,186 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:22,186 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/B is initiating minor compaction (all files) 2024-11-23T13:23:22,187 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/B in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/A is initiating minor compaction (all files) 2024-11-23T13:23:22,187 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/5e8e33831cb640039480a98cfa8cbe18, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.9 K 2024-11-23T13:23:22,187 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/A in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:22,187 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/04af2291779c4662a0d23ca9d6bba0c2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.9 K 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e8e33831cb640039480a98cfa8cbe18, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198413 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04af2291779c4662a0d23ca9d6bba0c2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198413 2024-11-23T13:23:22,187 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 17b2c9f4ecc1479dbe7b9f7d62e8fb87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732368199064 2024-11-23T13:23:22,188 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a241e05ca6474c3884da99925e9fa6e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732368200631 2024-11-23T13:23:22,188 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5056496822924c3985379e50d3f74af1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732368199064 2024-11-23T13:23:22,188 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b73af59fc1045b6875015bce6c4010d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732368200631 2024-11-23T13:23:22,192 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#B#compaction#476 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:22,193 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9a786c9154434c52bf18e3b60a2e3832 is 50, key is test_row_0/B:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:22,194 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#A#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:22,195 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/3d9b11d836be4a3d91870a6c8064f0f8 is 50, key is test_row_0/A:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:22,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742377_1553 (size=13323) 2024-11-23T13:23:22,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742378_1554 (size=13323) 2024-11-23T13:23:22,600 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/9a786c9154434c52bf18e3b60a2e3832 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9a786c9154434c52bf18e3b60a2e3832 2024-11-23T13:23:22,601 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/3d9b11d836be4a3d91870a6c8064f0f8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/3d9b11d836be4a3d91870a6c8064f0f8 2024-11-23T13:23:22,603 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/B of 827437fc31cfaf801e96764bfc0e4aaa into 9a786c9154434c52bf18e3b60a2e3832(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:22,603 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:22,603 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/B, priority=13, startTime=1732368202186; duration=0sec 2024-11-23T13:23:22,603 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/A of 827437fc31cfaf801e96764bfc0e4aaa into 3d9b11d836be4a3d91870a6c8064f0f8(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:22,603 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:22,603 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:B 2024-11-23T13:23:22,604 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/A, priority=13, startTime=1732368202186; duration=0sec 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:A 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:22,604 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 827437fc31cfaf801e96764bfc0e4aaa/C is initiating minor compaction (all files) 2024-11-23T13:23:22,604 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 827437fc31cfaf801e96764bfc0e4aaa/C in TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:22,604 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/1be67d4d29af49d9958bf60b1a051669, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp, totalSize=36.9 K 2024-11-23T13:23:22,605 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 1be67d4d29af49d9958bf60b1a051669, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1732368198413 2024-11-23T13:23:22,605 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ec61ad8eee394b1ea9ced9142aeccbc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1732368199064 2024-11-23T13:23:22,605 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 5210021fafd44e5dae735c8b5413031f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1732368200631 2024-11-23T13:23:22,610 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 827437fc31cfaf801e96764bfc0e4aaa#C#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:22,610 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/abd472a155c94656a1eaedc3308c1022 is 50, key is test_row_0/C:col10/1732368200635/Put/seqid=0 2024-11-23T13:23:22,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742379_1555 (size=13323) 2024-11-23T13:23:23,017 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/abd472a155c94656a1eaedc3308c1022 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/abd472a155c94656a1eaedc3308c1022 2024-11-23T13:23:23,020 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 827437fc31cfaf801e96764bfc0e4aaa/C of 827437fc31cfaf801e96764bfc0e4aaa into abd472a155c94656a1eaedc3308c1022(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:23,020 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:23,020 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa., storeName=827437fc31cfaf801e96764bfc0e4aaa/C, priority=13, startTime=1732368202186; duration=0sec 2024-11-23T13:23:23,020 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:23,020 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 827437fc31cfaf801e96764bfc0e4aaa:C 2024-11-23T13:23:27,959 DEBUG [Thread-2060 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4414259d to 127.0.0.1:51875 2024-11-23T13:23:27,959 DEBUG [Thread-2060 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:27,991 DEBUG [Thread-2058 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0de9f076 to 127.0.0.1:51875 2024-11-23T13:23:27,991 DEBUG [Thread-2058 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 81 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 38 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 102 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2888 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8663 rows 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2903 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8709 rows 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2893 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8676 rows 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2889 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8667 rows 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2892 2024-11-23T13:23:27,992 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8675 rows 2024-11-23T13:23:27,992 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:23:27,992 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c60eb7d to 127.0.0.1:51875 2024-11-23T13:23:27,992 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:27,994 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:23:27,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:23:27,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:27,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:27,998 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368207997"}]},"ts":"1732368207997"} 2024-11-23T13:23:27,999 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:23:28,001 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:23:28,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:23:28,003 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, UNASSIGN}] 2024-11-23T13:23:28,004 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, UNASSIGN 2024-11-23T13:23:28,005 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=827437fc31cfaf801e96764bfc0e4aaa, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:28,005 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:23:28,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure 827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:23:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:28,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:28,157 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:28,157 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:23:28,157 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing 827437fc31cfaf801e96764bfc0e4aaa, disabling compactions & flushes 2024-11-23T13:23:28,157 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:28,157 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:28,157 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. after waiting 0 ms 2024-11-23T13:23:28,157 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:28,157 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(2837): Flushing 827437fc31cfaf801e96764bfc0e4aaa 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=A 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=B 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 827437fc31cfaf801e96764bfc0e4aaa, store=C 2024-11-23T13:23:28,158 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:28,161 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/a8abca08f90f4abc81a4eeda40e01d7c is 50, key is test_row_0/A:col10/1732368207990/Put/seqid=0 2024-11-23T13:23:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742380_1556 (size=12301) 2024-11-23T13:23:28,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:28,565 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/a8abca08f90f4abc81a4eeda40e01d7c 2024-11-23T13:23:28,570 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/bf25d5b749ec419cb1904968e3b7a85a is 50, key is test_row_0/B:col10/1732368207990/Put/seqid=0 2024-11-23T13:23:28,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742381_1557 (size=12301) 2024-11-23T13:23:28,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:28,974 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/bf25d5b749ec419cb1904968e3b7a85a 2024-11-23T13:23:28,979 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/cf15d60d0ca1426f8aeae9a95ab059de is 50, key is test_row_0/C:col10/1732368207990/Put/seqid=0 2024-11-23T13:23:28,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742382_1558 (size=12301) 2024-11-23T13:23:29,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:29,383 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/cf15d60d0ca1426f8aeae9a95ab059de 2024-11-23T13:23:29,386 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/A/a8abca08f90f4abc81a4eeda40e01d7c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/a8abca08f90f4abc81a4eeda40e01d7c 2024-11-23T13:23:29,389 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/a8abca08f90f4abc81a4eeda40e01d7c, entries=150, sequenceid=440, filesize=12.0 K 2024-11-23T13:23:29,389 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/B/bf25d5b749ec419cb1904968e3b7a85a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/bf25d5b749ec419cb1904968e3b7a85a 2024-11-23T13:23:29,391 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/bf25d5b749ec419cb1904968e3b7a85a, entries=150, sequenceid=440, filesize=12.0 K 2024-11-23T13:23:29,392 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/.tmp/C/cf15d60d0ca1426f8aeae9a95ab059de as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cf15d60d0ca1426f8aeae9a95ab059de 2024-11-23T13:23:29,394 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cf15d60d0ca1426f8aeae9a95ab059de, entries=150, sequenceid=440, filesize=12.0 K 2024-11-23T13:23:29,395 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 827437fc31cfaf801e96764bfc0e4aaa in 1238ms, sequenceid=440, compaction requested=false 2024-11-23T13:23:29,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/299ff02f8c164067acf94a25137fdacd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b6e8149a3cb546af9780af0f8c95ab6f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f74439901a454d198f2e9da78ea26daa, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/150c3e85a2e94fefab18bfae8aa758e0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ff3e616141e9452a9cc26617909cc473, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/1e64a02bdbbd4f27ad28248c58cd739a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fd33f95ab6bd4d9b90ab3c38053eb702, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/04af2291779c4662a0d23ca9d6bba0c2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d] to archive 2024-11-23T13:23:29,396 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:29,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fa5680fa82d0413081e008f0be6b7f2a 2024-11-23T13:23:29,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/88f47dd7c04a4ec5b7e99df33e3a64a9 2024-11-23T13:23:29,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/e38974f23f9f4dd2bed987c4ea2c924b 2024-11-23T13:23:29,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/299ff02f8c164067acf94a25137fdacd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/299ff02f8c164067acf94a25137fdacd 2024-11-23T13:23:29,401 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5350f64650b841a6b7825be7297ff335 2024-11-23T13:23:29,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b6e8149a3cb546af9780af0f8c95ab6f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b6e8149a3cb546af9780af0f8c95ab6f 2024-11-23T13:23:29,402 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ac3a29c15a2c4e228a023fc26ee925ca 2024-11-23T13:23:29,403 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/d7c8957797c34b2380e93126d0392770 2024-11-23T13:23:29,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/875b49bfa7ab46a8ae2e0b6c3b1ac59d 2024-11-23T13:23:29,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ee5dfd809b4242af8f5add0be7f73cba 2024-11-23T13:23:29,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f74439901a454d198f2e9da78ea26daa to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f74439901a454d198f2e9da78ea26daa 2024-11-23T13:23:29,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b39e361ab7cc4a7a91a8dc6a77200525 2024-11-23T13:23:29,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/c72646285de347939c3b9c2dea93147b 2024-11-23T13:23:29,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/06f0d17a44a24c17ad36a8a9b50c3e00 2024-11-23T13:23:29,409 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/150c3e85a2e94fefab18bfae8aa758e0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/150c3e85a2e94fefab18bfae8aa758e0 2024-11-23T13:23:29,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/58ddeefcf7294d2bbaf50ca3d4fe7880 2024-11-23T13:23:29,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/112d0215c24d4518ab0fd7745e6e180d 2024-11-23T13:23:29,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ff3e616141e9452a9cc26617909cc473 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/ff3e616141e9452a9cc26617909cc473 2024-11-23T13:23:29,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/f94f0304a35a4974ab58b6b2d9d73772 2024-11-23T13:23:29,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/56e7c20b856945b2aa2f99dd6ea76db4 2024-11-23T13:23:29,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/1e64a02bdbbd4f27ad28248c58cd739a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/1e64a02bdbbd4f27ad28248c58cd739a 2024-11-23T13:23:29,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/7b0f05ae2e194b4d90c499765429a71f 2024-11-23T13:23:29,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/874072aeac3142028a65d1c15d67cd7d 2024-11-23T13:23:29,416 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b78f0c2b37af4ac9bbeacdb7f2c30b6a 2024-11-23T13:23:29,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fd33f95ab6bd4d9b90ab3c38053eb702 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/fd33f95ab6bd4d9b90ab3c38053eb702 2024-11-23T13:23:29,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b2c0bab5a79b467e81c8441502af26ae 2024-11-23T13:23:29,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/b027f26cc6d840799007ea2bb4f29beb 2024-11-23T13:23:29,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/04af2291779c4662a0d23ca9d6bba0c2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/04af2291779c4662a0d23ca9d6bba0c2 2024-11-23T13:23:29,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/5056496822924c3985379e50d3f74af1 2024-11-23T13:23:29,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/6b73af59fc1045b6875015bce6c4010d 2024-11-23T13:23:29,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/2970864988cc4271893c7b0db8d8c2a2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/10a1bd9c7881434099789b76fea4269e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0354a71d76554b958542c8d144c22bb0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/460e11b7db6941fea2e7433449f6fe66, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0180628cf4b24e849cabf575bb691943, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9ce1709ad32c481d8b38336c792602de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1cabcc1fc1664cce8f77262a44df581c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/5e8e33831cb640039480a98cfa8cbe18, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9] to archive 2024-11-23T13:23:29,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:29,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/7ef9b4822ff546219a240387103adda6 2024-11-23T13:23:29,424 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a18330f2049846ad87eed87bbde2e5e7 2024-11-23T13:23:29,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/2970864988cc4271893c7b0db8d8c2a2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/2970864988cc4271893c7b0db8d8c2a2 2024-11-23T13:23:29,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0c657e70726c4a97a1fe104ebbeadf26 2024-11-23T13:23:29,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/78ae4970317044aea983c00c9ca1af00 2024-11-23T13:23:29,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/10a1bd9c7881434099789b76fea4269e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/10a1bd9c7881434099789b76fea4269e 2024-11-23T13:23:29,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d5cf35d0dc994d8f9af92fd92002271f 2024-11-23T13:23:29,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/81d22435ae4842a6b309fc3a689d367b 2024-11-23T13:23:29,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/d744f5c586374b12bf7a9f78aa09e318 2024-11-23T13:23:29,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0354a71d76554b958542c8d144c22bb0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0354a71d76554b958542c8d144c22bb0 2024-11-23T13:23:29,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/252e82bac0a74b70a4d8b123ef4f8425 2024-11-23T13:23:29,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/b71a03a800db42dd9a2efe32315a52b8 2024-11-23T13:23:29,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/c88ef7df1dd54579ae8d957c7632dcf1 2024-11-23T13:23:29,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/460e11b7db6941fea2e7433449f6fe66 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/460e11b7db6941fea2e7433449f6fe66 2024-11-23T13:23:29,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/4906941c2ced43e78bb02048ae1b0716 2024-11-23T13:23:29,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/119637f9585c40148fcfade6509e2f47 2024-11-23T13:23:29,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0180628cf4b24e849cabf575bb691943 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/0180628cf4b24e849cabf575bb691943 2024-11-23T13:23:29,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9bdf994858f947ff91e878321337e176 2024-11-23T13:23:29,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/fa7f2f31af88498fb4f456c2bac639d3 2024-11-23T13:23:29,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9ce1709ad32c481d8b38336c792602de to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9ce1709ad32c481d8b38336c792602de 2024-11-23T13:23:29,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/118332302296443287b3e360851b2370 2024-11-23T13:23:29,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9488d869813c473ebbb64e77cdc9d1d6 2024-11-23T13:23:29,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1b98fe69a2e5450cb82e0cf97c755259 2024-11-23T13:23:29,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1cabcc1fc1664cce8f77262a44df581c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/1cabcc1fc1664cce8f77262a44df581c 2024-11-23T13:23:29,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/f2ea5c2f01d548ddbdc72276f6a7ba35 2024-11-23T13:23:29,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/406532d54fda43479d8ba891c23c88a0 2024-11-23T13:23:29,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/5e8e33831cb640039480a98cfa8cbe18 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/5e8e33831cb640039480a98cfa8cbe18 2024-11-23T13:23:29,444 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9f37f214336740b7b14e703f40ae5354 2024-11-23T13:23:29,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/17b2c9f4ecc1479dbe7b9f7d62e8fb87 2024-11-23T13:23:29,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/a241e05ca6474c3884da99925e9fa6e9 2024-11-23T13:23:29,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/40717ad9ae954b84967aa864ddf90ec3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cfb146faae62466fba0ce9805fc7015d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/705d74db8a274a0c9e6190a0ee1282de, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5b8c39f707da49ff93e6ee116f8d44fc, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f3c819a9e8dd45df8e578e04984ff990, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/49286b299f6b4704bb5951a02c94934c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/d061da3654ed43f8b98480649807bfac, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/1be67d4d29af49d9958bf60b1a051669, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f] to archive 2024-11-23T13:23:29,447 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:29,448 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b6012cf0e5f14089931661511964c5b1 2024-11-23T13:23:29,449 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f7bf81d67fd7442491e2067c3e2c8e37 2024-11-23T13:23:29,450 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/40717ad9ae954b84967aa864ddf90ec3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/40717ad9ae954b84967aa864ddf90ec3 2024-11-23T13:23:29,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/825a63d42f984b6983726adc34139239 2024-11-23T13:23:29,451 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/afdf06a3c44540919a77fcdc9a892460 2024-11-23T13:23:29,452 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cfb146faae62466fba0ce9805fc7015d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cfb146faae62466fba0ce9805fc7015d 2024-11-23T13:23:29,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0c69b898443e4bb2a4ae3b3f1d00c96d 2024-11-23T13:23:29,453 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7b0d6c8fe173415f827d82785c33cf46 2024-11-23T13:23:29,454 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/803adf32adc54b1f9941cadcb7b2b8b6 2024-11-23T13:23:29,455 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/705d74db8a274a0c9e6190a0ee1282de to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/705d74db8a274a0c9e6190a0ee1282de 2024-11-23T13:23:29,456 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f55fc349dfc14c61ac672f36300b4c5e 2024-11-23T13:23:29,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/3e71ef376ec64a3e90bac6675fbff557 2024-11-23T13:23:29,457 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/a78b5b6a444e4638b1cc7a453e91e480 2024-11-23T13:23:29,458 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5b8c39f707da49ff93e6ee116f8d44fc to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5b8c39f707da49ff93e6ee116f8d44fc 2024-11-23T13:23:29,459 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/984aa338b6c24476b7e1b9c456060b2f 2024-11-23T13:23:29,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/b8cfb9d04eaa44f9b503ba11ef202aa9 2024-11-23T13:23:29,460 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f3c819a9e8dd45df8e578e04984ff990 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/f3c819a9e8dd45df8e578e04984ff990 2024-11-23T13:23:29,461 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/0f0c03bf1b1b47fda6c7435b061e464f 2024-11-23T13:23:29,462 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/090a15293df74a0ba746310de88def57 2024-11-23T13:23:29,463 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/49286b299f6b4704bb5951a02c94934c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/49286b299f6b4704bb5951a02c94934c 2024-11-23T13:23:29,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/92dde3d1dd7b4ccab97ce733f030301e 2024-11-23T13:23:29,464 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/8c51fe8532844a63a1fcba42ee745590 2024-11-23T13:23:29,465 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/23c9ec6ecfa040a692b6158c85eeb257 2024-11-23T13:23:29,466 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/d061da3654ed43f8b98480649807bfac to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/d061da3654ed43f8b98480649807bfac 2024-11-23T13:23:29,467 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7cf95e61dbc54fe4957955868d391fcd 2024-11-23T13:23:29,468 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/7753fb79a5464667a16a872b20b80e64 2024-11-23T13:23:29,469 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/1be67d4d29af49d9958bf60b1a051669 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/1be67d4d29af49d9958bf60b1a051669 2024-11-23T13:23:29,470 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/78e779f613b043bcafee0e67c94f7836 2024-11-23T13:23:29,471 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/ec61ad8eee394b1ea9ced9142aeccbc1 2024-11-23T13:23:29,472 DEBUG [StoreCloser-TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/5210021fafd44e5dae735c8b5413031f 2024-11-23T13:23:29,475 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/recovered.edits/443.seqid, newMaxSeqId=443, maxSeqId=1 2024-11-23T13:23:29,475 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa. 2024-11-23T13:23:29,475 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for 827437fc31cfaf801e96764bfc0e4aaa: 2024-11-23T13:23:29,477 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed 827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:29,477 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=827437fc31cfaf801e96764bfc0e4aaa, regionState=CLOSED 2024-11-23T13:23:29,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-23T13:23:29,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure 827437fc31cfaf801e96764bfc0e4aaa, server=ba2e440802a7,33173,1732368061317 in 1.4730 sec 2024-11-23T13:23:29,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-23T13:23:29,480 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=827437fc31cfaf801e96764bfc0e4aaa, UNASSIGN in 1.4760 sec 2024-11-23T13:23:29,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-23T13:23:29,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4780 sec 2024-11-23T13:23:29,482 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368209482"}]},"ts":"1732368209482"} 2024-11-23T13:23:29,482 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:23:29,485 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:23:29,486 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4900 sec 2024-11-23T13:23:29,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:23:30,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-23T13:23:30,101 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-23T13:23:30,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:23:30,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,103 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-23T13:23:30,103 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=150, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,104 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:30,106 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/recovered.edits] 2024-11-23T13:23:30,108 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/3d9b11d836be4a3d91870a6c8064f0f8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/3d9b11d836be4a3d91870a6c8064f0f8 2024-11-23T13:23:30,108 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/a8abca08f90f4abc81a4eeda40e01d7c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/A/a8abca08f90f4abc81a4eeda40e01d7c 2024-11-23T13:23:30,110 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9a786c9154434c52bf18e3b60a2e3832 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/9a786c9154434c52bf18e3b60a2e3832 2024-11-23T13:23:30,111 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/bf25d5b749ec419cb1904968e3b7a85a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/B/bf25d5b749ec419cb1904968e3b7a85a 2024-11-23T13:23:30,112 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/abd472a155c94656a1eaedc3308c1022 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/abd472a155c94656a1eaedc3308c1022 2024-11-23T13:23:30,113 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cf15d60d0ca1426f8aeae9a95ab059de to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/C/cf15d60d0ca1426f8aeae9a95ab059de 2024-11-23T13:23:30,115 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/recovered.edits/443.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa/recovered.edits/443.seqid 2024-11-23T13:23:30,115 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/827437fc31cfaf801e96764bfc0e4aaa 2024-11-23T13:23:30,115 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:23:30,117 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=150, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,118 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:23:30,119 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:23:30,120 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=150, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,120 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:23:30,120 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368210120"}]},"ts":"9223372036854775807"} 2024-11-23T13:23:30,121 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:23:30,121 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 827437fc31cfaf801e96764bfc0e4aaa, NAME => 'TestAcidGuarantees,,1732368178676.827437fc31cfaf801e96764bfc0e4aaa.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:23:30,122 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:23:30,122 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368210122"}]},"ts":"9223372036854775807"} 2024-11-23T13:23:30,123 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:23:30,125 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=150, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 24 msec 2024-11-23T13:23:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-23T13:23:30,204 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-23T13:23:30,213 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=237 (was 241), OpenFileDescriptor=451 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=277 (was 309), ProcessCount=11 (was 11), AvailableMemoryMB=3644 (was 3654) 2024-11-23T13:23:30,221 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=277, ProcessCount=11, AvailableMemoryMB=3644 2024-11-23T13:23:30,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:23:30,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:23:30,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:30,224 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T13:23:30,224 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:30,224 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 151 2024-11-23T13:23:30,224 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T13:23:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:30,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742383_1559 (size=960) 2024-11-23T13:23:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:30,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:30,631 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7 2024-11-23T13:23:30,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742384_1560 (size=53) 2024-11-23T13:23:30,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3fb74ed11ce3b976f22e2e146ef6eea6, disabling compactions & flushes 2024-11-23T13:23:31,036 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. after waiting 0 ms 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,036 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,036 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:31,037 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T13:23:31,037 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732368211037"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732368211037"}]},"ts":"1732368211037"} 2024-11-23T13:23:31,038 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-23T13:23:31,039 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T13:23:31,039 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368211039"}]},"ts":"1732368211039"} 2024-11-23T13:23:31,039 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-23T13:23:31,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, ASSIGN}] 2024-11-23T13:23:31,046 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, ASSIGN 2024-11-23T13:23:31,046 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, ASSIGN; state=OFFLINE, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=false 2024-11-23T13:23:31,074 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:23:31,197 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:31,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; OpenRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:23:31,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:31,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:31,354 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,354 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7285): Opening region: {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:23:31,354 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,354 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:23:31,354 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7327): checking encryption for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,354 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7330): checking classloading for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,355 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,356 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:31,356 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName A 2024-11-23T13:23:31,357 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:31,357 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:31,357 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,358 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:31,358 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName B 2024-11-23T13:23:31,358 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:31,358 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:31,358 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,359 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:31,359 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName C 2024-11-23T13:23:31,359 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:31,359 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:31,359 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,360 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,360 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,361 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:23:31,362 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1085): writing seq id for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:31,364 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T13:23:31,364 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1102): Opened 3fb74ed11ce3b976f22e2e146ef6eea6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74084521, jitterRate=0.10394538938999176}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:23:31,364 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1001): Region open journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:31,365 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., pid=153, masterSystemTime=1732368211351 2024-11-23T13:23:31,366 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,366 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:31,366 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=OPEN, openSeqNum=2, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:31,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-23T13:23:31,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; OpenRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 in 168 msec 2024-11-23T13:23:31,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-23T13:23:31,369 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, ASSIGN in 323 msec 2024-11-23T13:23:31,370 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T13:23:31,370 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368211370"}]},"ts":"1732368211370"} 2024-11-23T13:23:31,370 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-23T13:23:31,372 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T13:23:31,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-11-23T13:23:32,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-23T13:23:32,328 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-23T13:23:32,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-23T13:23:32,336 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:32,337 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:32,338 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:32,339 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T13:23:32,340 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T13:23:32,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-23T13:23:32,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T13:23:32,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:32,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742385_1561 (size=996) 2024-11-23T13:23:32,751 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-23T13:23:32,751 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-23T13:23:32,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:23:32,754 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, REOPEN/MOVE}] 2024-11-23T13:23:32,754 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, REOPEN/MOVE 2024-11-23T13:23:32,755 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:32,756 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:23:32,756 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:23:32,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:32,907 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:32,907 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:23:32,907 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing 3fb74ed11ce3b976f22e2e146ef6eea6, disabling compactions & flushes 2024-11-23T13:23:32,908 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:32,908 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:32,908 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. after waiting 0 ms 2024-11-23T13:23:32,908 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:32,911 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-23T13:23:32,911 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:32,911 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:32,911 WARN [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionServer(3786): Not adding moved region record: 3fb74ed11ce3b976f22e2e146ef6eea6 to self. 2024-11-23T13:23:32,913 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:32,913 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=CLOSED 2024-11-23T13:23:32,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-23T13:23:32,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 in 158 msec 2024-11-23T13:23:32,915 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, REOPEN/MOVE; state=CLOSED, location=ba2e440802a7,33173,1732368061317; forceNewPlan=false, retain=true 2024-11-23T13:23:33,065 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=OPENING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=156, state=RUNNABLE; OpenRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:23:33,218 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,220 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,220 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7285): Opening region: {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} 2024-11-23T13:23:33,221 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,221 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T13:23:33,221 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7327): checking encryption for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,221 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7330): checking classloading for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,222 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,223 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:33,223 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName A 2024-11-23T13:23:33,224 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:33,224 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:33,224 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,225 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:33,225 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName B 2024-11-23T13:23:33,225 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:33,225 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:33,225 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,226 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-23T13:23:33,226 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3fb74ed11ce3b976f22e2e146ef6eea6 columnFamilyName C 2024-11-23T13:23:33,226 DEBUG [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:33,226 INFO [StoreOpener-3fb74ed11ce3b976f22e2e146ef6eea6-1 {}] regionserver.HStore(327): Store=3fb74ed11ce3b976f22e2e146ef6eea6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T13:23:33,226 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,227 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,227 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,228 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-23T13:23:33,229 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1085): writing seq id for 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,230 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1102): Opened 3fb74ed11ce3b976f22e2e146ef6eea6; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68885655, jitterRate=0.02647624909877777}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-23T13:23:33,230 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1001): Region open journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:33,231 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., pid=158, masterSystemTime=1732368213218 2024-11-23T13:23:33,232 DEBUG [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,232 INFO [RS_OPEN_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=OPEN, openSeqNum=5, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-11-23T13:23:33,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; OpenRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 in 167 msec 2024-11-23T13:23:33,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-23T13:23:33,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, REOPEN/MOVE in 480 msec 2024-11-23T13:23:33,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-23T13:23:33,237 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 484 msec 2024-11-23T13:23:33,238 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 896 msec 2024-11-23T13:23:33,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-23T13:23:33,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-23T13:23:33,243 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,245 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-23T13:23:33,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,248 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-23T13:23:33,251 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,253 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-23T13:23:33,255 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,256 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-23T13:23:33,259 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,260 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-23T13:23:33,264 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,265 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-23T13:23:33,268 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,269 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-23T13:23:33,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,273 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-23T13:23:33,277 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:51875 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-11-23T13:23:33,281 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T13:23:33,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:33,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-23T13:23:33,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:33,287 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:33,287 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:33,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:33,288 DEBUG [hconnection-0x125409cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,289 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39306, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,292 DEBUG [hconnection-0x19d080d7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,292 DEBUG [hconnection-0x70eb0531-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,293 DEBUG [hconnection-0x59e02ba0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,293 DEBUG [hconnection-0x13a67364-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,293 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,293 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,293 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,293 DEBUG [hconnection-0x3bdf662e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,294 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,294 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,297 DEBUG [hconnection-0x1467f15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,297 DEBUG [hconnection-0x2a3faba5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,297 DEBUG [hconnection-0x12f30586-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,298 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,298 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,298 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,298 DEBUG [hconnection-0x2f821463-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T13:23:33,299 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T13:23:33,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-23T13:23:33,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:33,302 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:33,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:33,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:33,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:33,303 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:33,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368273313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368273314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368273314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368273316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368273316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112371d5f47e178444a2a45ad7aee55b4755_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368213301/Put/seqid=0 2024-11-23T13:23:33,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742386_1562 (size=12154) 2024-11-23T13:23:33,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:33,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368273417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368273417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368273418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368273419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,420 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368273419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,438 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:33,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:33,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:33,591 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368273620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368273620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368273621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368273621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368273622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,744 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,747 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:33,755 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112371d5f47e178444a2a45ad7aee55b4755_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112371d5f47e178444a2a45ad7aee55b4755_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:33,756 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/bfe50b0ee6a1476faed25a9ecb843ae3, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:33,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/bfe50b0ee6a1476faed25a9ecb843ae3 is 175, key is test_row_0/A:col10/1732368213301/Put/seqid=0 2024-11-23T13:23:33,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742387_1563 (size=30955) 2024-11-23T13:23:33,760 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/bfe50b0ee6a1476faed25a9ecb843ae3 2024-11-23T13:23:33,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/bcfa7ff5f6c048b5890acb0e3adf047b is 50, key is test_row_0/B:col10/1732368213301/Put/seqid=0 2024-11-23T13:23:33,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742388_1564 (size=12001) 2024-11-23T13:23:33,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/bcfa7ff5f6c048b5890acb0e3adf047b 2024-11-23T13:23:33,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/825933bcdecd461d87664a17a0fa7d77 is 50, key is test_row_0/C:col10/1732368213301/Put/seqid=0 2024-11-23T13:23:33,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742389_1565 (size=12001) 2024-11-23T13:23:33,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:33,897 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:33,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:33,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368273923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368273923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368273923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368273924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:33,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:33,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368273926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,050 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:34,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:34,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:34,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:34,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:34,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:34,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:34,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/825933bcdecd461d87664a17a0fa7d77 2024-11-23T13:23:34,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/bfe50b0ee6a1476faed25a9ecb843ae3 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3 2024-11-23T13:23:34,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3, entries=150, sequenceid=18, filesize=30.2 K 2024-11-23T13:23:34,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/bcfa7ff5f6c048b5890acb0e3adf047b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b 2024-11-23T13:23:34,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b, entries=150, sequenceid=18, filesize=11.7 K 2024-11-23T13:23:34,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/825933bcdecd461d87664a17a0fa7d77 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77 2024-11-23T13:23:34,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77, entries=150, sequenceid=18, filesize=11.7 K 2024-11-23T13:23:34,225 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 923ms, sequenceid=18, compaction requested=false 2024-11-23T13:23:34,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:34,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-23T13:23:34,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:34,356 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:23:34,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:34,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:34,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123998a685ac39045a79b00a69b633b31d0_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368213315/Put/seqid=0 2024-11-23T13:23:34,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742390_1566 (size=12154) 2024-11-23T13:23:34,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:34,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:34,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:34,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368274433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368274433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368274463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368274463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368274463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368274563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368274564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368274566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368274566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368274566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368274766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:34,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368274767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368274769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368274770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:34,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368274770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:34,771 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123998a685ac39045a79b00a69b633b31d0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123998a685ac39045a79b00a69b633b31d0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:34,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/011d29a5d925475f97d42ee19f072e85, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:34,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/011d29a5d925475f97d42ee19f072e85 is 175, key is test_row_0/A:col10/1732368213315/Put/seqid=0 2024-11-23T13:23:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742391_1567 (size=30955) 2024-11-23T13:23:34,777 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/011d29a5d925475f97d42ee19f072e85 2024-11-23T13:23:34,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/2ddce37102f5499388140625ae8d165f is 50, key is test_row_0/B:col10/1732368213315/Put/seqid=0 2024-11-23T13:23:34,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742392_1568 (size=12001) 2024-11-23T13:23:34,993 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-23T13:23:35,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368275069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368275069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368275071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368275071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368275074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,186 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/2ddce37102f5499388140625ae8d165f 2024-11-23T13:23:35,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/c0464ac6dba54ffc948d0a4ebb63a298 is 50, key is test_row_0/C:col10/1732368213315/Put/seqid=0 2024-11-23T13:23:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742393_1569 (size=12001) 2024-11-23T13:23:35,197 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/c0464ac6dba54ffc948d0a4ebb63a298 2024-11-23T13:23:35,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/011d29a5d925475f97d42ee19f072e85 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85 2024-11-23T13:23:35,204 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85, entries=150, sequenceid=41, filesize=30.2 K 2024-11-23T13:23:35,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/2ddce37102f5499388140625ae8d165f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f 2024-11-23T13:23:35,208 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T13:23:35,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/c0464ac6dba54ffc948d0a4ebb63a298 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298 2024-11-23T13:23:35,213 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298, entries=150, sequenceid=41, filesize=11.7 K 2024-11-23T13:23:35,214 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 858ms, sequenceid=41, compaction requested=false 2024-11-23T13:23:35,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:35,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:35,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-23T13:23:35,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-23T13:23:35,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T13:23:35,220 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9310 sec 2024-11-23T13:23:35,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.9340 sec 2024-11-23T13:23:35,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-23T13:23:35,390 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-23T13:23:35,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:35,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-23T13:23:35,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T13:23:35,393 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:35,394 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:35,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T13:23:35,545 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:35,546 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:35,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:35,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236c1f0d7ed24c49279715a7a77cdaf78d_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368214433/Put/seqid=0 2024-11-23T13:23:35,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742394_1570 (size=12154) 2024-11-23T13:23:35,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:35,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:35,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368275587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368275589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368275589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368275590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368275590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368275691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368275693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T13:23:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368275693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368275693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368275693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368275893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368275895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368275896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368275897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:35,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368275897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:35,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:35,960 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236c1f0d7ed24c49279715a7a77cdaf78d_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c1f0d7ed24c49279715a7a77cdaf78d_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/db413775ab134f7abd383a01c5fc0061, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/db413775ab134f7abd383a01c5fc0061 is 175, key is test_row_0/A:col10/1732368214433/Put/seqid=0 2024-11-23T13:23:35,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742395_1571 (size=30955) 2024-11-23T13:23:35,971 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/db413775ab134f7abd383a01c5fc0061 2024-11-23T13:23:35,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/0caebd9a24c9413494651d9973323e2d is 50, key is test_row_0/B:col10/1732368214433/Put/seqid=0 2024-11-23T13:23:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742396_1572 (size=12001) 2024-11-23T13:23:35,981 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/0caebd9a24c9413494651d9973323e2d 2024-11-23T13:23:35,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/432f977297ed462eb3e8742353a7671a is 50, key is test_row_0/C:col10/1732368214433/Put/seqid=0 2024-11-23T13:23:35,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742397_1573 (size=12001) 2024-11-23T13:23:35,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T13:23:36,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368276196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368276199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368276200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368276201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368276201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,390 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/432f977297ed462eb3e8742353a7671a 2024-11-23T13:23:36,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/db413775ab134f7abd383a01c5fc0061 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061 2024-11-23T13:23:36,398 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061, entries=150, sequenceid=54, filesize=30.2 K 2024-11-23T13:23:36,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/0caebd9a24c9413494651d9973323e2d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d 2024-11-23T13:23:36,402 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T13:23:36,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/432f977297ed462eb3e8742353a7671a as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a 2024-11-23T13:23:36,405 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a, entries=150, sequenceid=54, filesize=11.7 K 2024-11-23T13:23:36,406 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 860ms, sequenceid=54, compaction requested=true 2024-11-23T13:23:36,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:36,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:36,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-23T13:23:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-23T13:23:36,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-23T13:23:36,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0130 sec 2024-11-23T13:23:36,409 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.0160 sec 2024-11-23T13:23:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-23T13:23:36,496 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-23T13:23:36,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-23T13:23:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:36,499 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:36,499 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:36,499 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:36,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-23T13:23:36,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:36,652 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:36,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:36,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123bae928c3d5504437af072bfc5826dd62_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368215586/Put/seqid=0 2024-11-23T13:23:36,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742398_1574 (size=12154) 2024-11-23T13:23:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:36,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:36,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368276708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368276709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368276711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368276712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368276712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:36,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368276813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368276814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368276817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:36,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368276817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368277016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368277016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368277020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368277020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:37,066 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123bae928c3d5504437af072bfc5826dd62_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123bae928c3d5504437af072bfc5826dd62_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:37,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/4bb8871e3a284085ba0526e980a42c84, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:37,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/4bb8871e3a284085ba0526e980a42c84 is 175, key is test_row_0/A:col10/1732368215586/Put/seqid=0 2024-11-23T13:23:37,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742399_1575 (size=30955) 2024-11-23T13:23:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:37,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368277319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368277321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368277322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368277324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,474 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/4bb8871e3a284085ba0526e980a42c84 2024-11-23T13:23:37,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a8fed4e12d13411b894ed8a2b6ad2b79 is 50, key is test_row_0/B:col10/1732368215586/Put/seqid=0 2024-11-23T13:23:37,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742400_1576 (size=12001) 2024-11-23T13:23:37,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:37,718 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368277717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368277822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368277825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368277826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368277828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:37,885 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a8fed4e12d13411b894ed8a2b6ad2b79 2024-11-23T13:23:37,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/822762eca48448b2b8fa4d488bc511b8 is 50, key is test_row_0/C:col10/1732368215586/Put/seqid=0 2024-11-23T13:23:37,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742401_1577 (size=12001) 2024-11-23T13:23:38,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/822762eca48448b2b8fa4d488bc511b8 2024-11-23T13:23:38,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/4bb8871e3a284085ba0526e980a42c84 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84 2024-11-23T13:23:38,307 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84, entries=150, sequenceid=78, filesize=30.2 K 2024-11-23T13:23:38,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a8fed4e12d13411b894ed8a2b6ad2b79 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79 2024-11-23T13:23:38,310 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:23:38,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/822762eca48448b2b8fa4d488bc511b8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8 2024-11-23T13:23:38,315 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8, entries=150, sequenceid=78, filesize=11.7 K 2024-11-23T13:23:38,315 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1664ms, sequenceid=78, compaction requested=true 2024-11-23T13:23:38,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:38,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:38,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-23T13:23:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-23T13:23:38,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-23T13:23:38,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8170 sec 2024-11-23T13:23:38,319 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.8210 sec 2024-11-23T13:23:38,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-23T13:23:38,602 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-23T13:23:38,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:38,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-23T13:23:38,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:38,605 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:38,605 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:38,605 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:38,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:38,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-23T13:23:38,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:38,758 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:38,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:38,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a617eaf743f24252a548772f82694f55_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368216706/Put/seqid=0 2024-11-23T13:23:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742402_1578 (size=12154) 2024-11-23T13:23:38,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:38,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:38,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368278848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368278848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368278849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368278849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:38,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368278952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368278952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368278953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:38,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:38,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368278953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368279156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368279156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368279156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368279157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:39,172 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123a617eaf743f24252a548772f82694f55_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a617eaf743f24252a548772f82694f55_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:39,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/23da3d253bce40f3ab741a172550195b, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:39,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/23da3d253bce40f3ab741a172550195b is 175, key is test_row_0/A:col10/1732368216706/Put/seqid=0 2024-11-23T13:23:39,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742403_1579 (size=30955) 2024-11-23T13:23:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:39,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368279461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368279461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368279461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368279462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,584 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/23da3d253bce40f3ab741a172550195b 2024-11-23T13:23:39,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/94e03dfa200e473987f11313d7f906e2 is 50, key is test_row_0/B:col10/1732368216706/Put/seqid=0 2024-11-23T13:23:39,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742404_1580 (size=12001) 2024-11-23T13:23:39,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:39,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368279725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,727 DEBUG [Thread-2483 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:39,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368279963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368279964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368279966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368279968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:39,994 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/94e03dfa200e473987f11313d7f906e2 2024-11-23T13:23:40,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/3d8f2d06580a41c69b46cab7e8904695 is 50, key is test_row_0/C:col10/1732368216706/Put/seqid=0 2024-11-23T13:23:40,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742405_1581 (size=12001) 2024-11-23T13:23:40,408 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/3d8f2d06580a41c69b46cab7e8904695 2024-11-23T13:23:40,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/23da3d253bce40f3ab741a172550195b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b 2024-11-23T13:23:40,415 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b, entries=150, sequenceid=91, filesize=30.2 K 2024-11-23T13:23:40,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/94e03dfa200e473987f11313d7f906e2 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2 2024-11-23T13:23:40,419 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2, entries=150, sequenceid=91, filesize=11.7 K 2024-11-23T13:23:40,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/3d8f2d06580a41c69b46cab7e8904695 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695 2024-11-23T13:23:40,423 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695, entries=150, sequenceid=91, filesize=11.7 K 2024-11-23T13:23:40,424 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1666ms, sequenceid=91, compaction requested=true 2024-11-23T13:23:40,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:40,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:40,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-23T13:23:40,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-23T13:23:40,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-23T13:23:40,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8200 sec 2024-11-23T13:23:40,427 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.8240 sec 2024-11-23T13:23:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-23T13:23:40,709 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-23T13:23:40,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:40,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-23T13:23:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:40,711 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:40,711 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:40,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:40,863 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:40,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:40,864 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:40,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:40,870 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f095e7e963c44f70923bff6e3e464ec0_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368218843/Put/seqid=0 2024-11-23T13:23:40,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742406_1582 (size=12154) 2024-11-23T13:23:40,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:40,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:40,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:40,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368280980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:40,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368280983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:40,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:40,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368280983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:40,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:40,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368280984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:41,074 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:23:41,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-23T13:23:41,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368281085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368281087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368281087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368281088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:41,277 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f095e7e963c44f70923bff6e3e464ec0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f095e7e963c44f70923bff6e3e464ec0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:41,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9d93ddadc0f14768aaaf54a30885e219, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:41,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9d93ddadc0f14768aaaf54a30885e219 is 175, key is test_row_0/A:col10/1732368218843/Put/seqid=0 2024-11-23T13:23:41,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742407_1583 (size=30955) 2024-11-23T13:23:41,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368281289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368281290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,294 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368281291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368281292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:41,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368281593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368281595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:41,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368281595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368281595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:41,685 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=114, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9d93ddadc0f14768aaaf54a30885e219 2024-11-23T13:23:41,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/595677f3503d4cb5833d559d94ba8d4b is 50, key is test_row_0/B:col10/1732368218843/Put/seqid=0 2024-11-23T13:23:41,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742408_1584 (size=12001) 2024-11-23T13:23:41,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:42,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368282097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:42,104 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/595677f3503d4cb5833d559d94ba8d4b 2024-11-23T13:23:42,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:42,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368282102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:42,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368282102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:42,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368282102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:42,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/94c7a59b011d4e31829643088c32e71f is 50, key is test_row_0/C:col10/1732368218843/Put/seqid=0 2024-11-23T13:23:42,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742409_1585 (size=12001) 2024-11-23T13:23:42,515 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/94c7a59b011d4e31829643088c32e71f 2024-11-23T13:23:42,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9d93ddadc0f14768aaaf54a30885e219 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219 2024-11-23T13:23:42,522 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219, entries=150, sequenceid=114, filesize=30.2 K 2024-11-23T13:23:42,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/595677f3503d4cb5833d559d94ba8d4b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b 2024-11-23T13:23:42,526 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b, entries=150, sequenceid=114, filesize=11.7 K 2024-11-23T13:23:42,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/94c7a59b011d4e31829643088c32e71f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f 2024-11-23T13:23:42,529 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f, entries=150, sequenceid=114, filesize=11.7 K 2024-11-23T13:23:42,530 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1666ms, sequenceid=114, compaction requested=true 2024-11-23T13:23:42,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:42,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:42,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-23T13:23:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-23T13:23:42,532 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-23T13:23:42,533 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8190 sec 2024-11-23T13:23:42,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.8230 sec 2024-11-23T13:23:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-23T13:23:42,815 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-23T13:23:42,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-23T13:23:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:42,817 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:42,817 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:42,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:42,969 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:42,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-23T13:23:42,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:42,970 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:42,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:42,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237442b66dadca452fb0c2bcb88fbde753_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368220983/Put/seqid=0 2024-11-23T13:23:42,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742410_1586 (size=12154) 2024-11-23T13:23:43,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:43,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:43,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368283130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368283130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368283131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368283132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368283236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368283237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368283237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368283237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:43,383 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411237442b66dadca452fb0c2bcb88fbde753_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237442b66dadca452fb0c2bcb88fbde753_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:43,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/7c33c97fe37d4491a5e3849a3b28d64c, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:43,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/7c33c97fe37d4491a5e3849a3b28d64c is 175, key is test_row_0/A:col10/1732368220983/Put/seqid=0 2024-11-23T13:23:43,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742411_1587 (size=30955) 2024-11-23T13:23:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:43,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368283438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368283439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368283440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368283440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368283742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368283742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368283743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368283743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39330 deadline: 1732368283750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:43,752 DEBUG [Thread-2483 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., hostname=ba2e440802a7,33173,1732368061317, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T13:23:43,791 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=128, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/7c33c97fe37d4491a5e3849a3b28d64c 2024-11-23T13:23:43,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/8b118e068dff42fabe0a6d5fdf164580 is 50, key is test_row_0/B:col10/1732368220983/Put/seqid=0 2024-11-23T13:23:43,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742412_1588 (size=12001) 2024-11-23T13:23:43,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:44,201 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/8b118e068dff42fabe0a6d5fdf164580 2024-11-23T13:23:44,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e47bb9e303e14a629af700a0108460dd is 50, key is test_row_0/C:col10/1732368220983/Put/seqid=0 2024-11-23T13:23:44,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742413_1589 (size=12001) 2024-11-23T13:23:44,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368284245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:44,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:44,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368284247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:44,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368284248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:44,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368284249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:44,611 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=128 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e47bb9e303e14a629af700a0108460dd 2024-11-23T13:23:44,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/7c33c97fe37d4491a5e3849a3b28d64c as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c 2024-11-23T13:23:44,618 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c, entries=150, sequenceid=128, filesize=30.2 K 2024-11-23T13:23:44,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/8b118e068dff42fabe0a6d5fdf164580 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580 2024-11-23T13:23:44,622 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580, entries=150, sequenceid=128, filesize=11.7 K 2024-11-23T13:23:44,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e47bb9e303e14a629af700a0108460dd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd 2024-11-23T13:23:44,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd, entries=150, sequenceid=128, filesize=11.7 K 2024-11-23T13:23:44,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1657ms, sequenceid=128, compaction requested=true 2024-11-23T13:23:44,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:44,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:44,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-23T13:23:44,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-23T13:23:44,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T13:23:44,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8090 sec 2024-11-23T13:23:44,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.8130 sec 2024-11-23T13:23:44,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-23T13:23:44,921 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-23T13:23:44,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:44,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-23T13:23:44,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:44,923 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:44,924 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:44,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:45,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:45,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:45,076 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:45,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:45,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:45,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239665e3fd3d884a65a447de1868366876_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368223130/Put/seqid=0 2024-11-23T13:23:45,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742414_1590 (size=12304) 2024-11-23T13:23:45,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:45,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:45,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:45,265 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368285262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368285263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368285263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,268 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368285265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368285366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368285367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368285367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368285369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:45,492 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239665e3fd3d884a65a447de1868366876_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239665e3fd3d884a65a447de1868366876_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/82d375a02c6a4edb851a4cad96eee64f, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:45,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/82d375a02c6a4edb851a4cad96eee64f is 175, key is test_row_0/A:col10/1732368223130/Put/seqid=0 2024-11-23T13:23:45,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742415_1591 (size=31105) 2024-11-23T13:23:45,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:45,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368285570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368285571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368285572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368285573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368285874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368285875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368285875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:45,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368285876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:45,904 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=150, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/82d375a02c6a4edb851a4cad96eee64f 2024-11-23T13:23:45,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a2c942c500324e9b835936c2b4223c35 is 50, key is test_row_0/B:col10/1732368223130/Put/seqid=0 2024-11-23T13:23:45,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742416_1592 (size=12151) 2024-11-23T13:23:46,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:46,326 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a2c942c500324e9b835936c2b4223c35 2024-11-23T13:23:46,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/8a5cb15b23ec4a1b8c61249554903891 is 50, key is test_row_0/C:col10/1732368223130/Put/seqid=0 2024-11-23T13:23:46,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742417_1593 (size=12151) 2024-11-23T13:23:46,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368286379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368286384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368286384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:46,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368286384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:46,737 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=150 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/8a5cb15b23ec4a1b8c61249554903891 2024-11-23T13:23:46,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/82d375a02c6a4edb851a4cad96eee64f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f 2024-11-23T13:23:46,744 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f, entries=150, sequenceid=150, filesize=30.4 K 2024-11-23T13:23:46,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/a2c942c500324e9b835936c2b4223c35 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35 2024-11-23T13:23:46,747 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35, entries=150, sequenceid=150, filesize=11.9 K 2024-11-23T13:23:46,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/8a5cb15b23ec4a1b8c61249554903891 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891 2024-11-23T13:23:46,752 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891, entries=150, sequenceid=150, filesize=11.9 K 2024-11-23T13:23:46,753 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1677ms, sequenceid=150, compaction requested=true 2024-11-23T13:23:46,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:46,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:46,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-23T13:23:46,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-23T13:23:46,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-23T13:23:46,756 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8300 sec 2024-11-23T13:23:46,757 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.8340 sec 2024-11-23T13:23:47,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-23T13:23:47,027 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-23T13:23:47,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:47,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-23T13:23:47,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T13:23:47,029 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:47,030 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:47,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:47,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T13:23:47,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:47,182 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:47,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236d5b3d8247784e7d9fcd20432249ba6f_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368225264/Put/seqid=0 2024-11-23T13:23:47,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742418_1594 (size=12304) 2024-11-23T13:23:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T13:23:47,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:47,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:47,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368287411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368287413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368287414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,419 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368287416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368287517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368287518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368287518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368287520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:47,595 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411236d5b3d8247784e7d9fcd20432249ba6f_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236d5b3d8247784e7d9fcd20432249ba6f_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:47,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/e4188367c4b149f8b2aa5767d01c0c40, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:47,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/e4188367c4b149f8b2aa5767d01c0c40 is 175, key is test_row_0/A:col10/1732368225264/Put/seqid=0 2024-11-23T13:23:47,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742419_1595 (size=31105) 2024-11-23T13:23:47,602 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=164, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/e4188367c4b149f8b2aa5767d01c0c40 2024-11-23T13:23:47,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/78a44d4a8e3040fd88cf297b80e19989 is 50, key is test_row_0/B:col10/1732368225264/Put/seqid=0 2024-11-23T13:23:47,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742420_1596 (size=12151) 2024-11-23T13:23:47,612 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/78a44d4a8e3040fd88cf297b80e19989 2024-11-23T13:23:47,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/87cc6a8c51c1482ab71f3af3498bd932 is 50, key is test_row_0/C:col10/1732368225264/Put/seqid=0 2024-11-23T13:23:47,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742421_1597 (size=12151) 2024-11-23T13:23:47,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/87cc6a8c51c1482ab71f3af3498bd932 2024-11-23T13:23:47,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/e4188367c4b149f8b2aa5767d01c0c40 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40 2024-11-23T13:23:47,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T13:23:47,633 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40, entries=150, sequenceid=164, filesize=30.4 K 2024-11-23T13:23:47,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/78a44d4a8e3040fd88cf297b80e19989 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989 2024-11-23T13:23:47,636 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T13:23:47,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/87cc6a8c51c1482ab71f3af3498bd932 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932 2024-11-23T13:23:47,640 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932, entries=150, sequenceid=164, filesize=11.9 K 2024-11-23T13:23:47,640 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 458ms, sequenceid=164, compaction requested=true 2024-11-23T13:23:47,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:47,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:47,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-23T13:23:47,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-23T13:23:47,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-23T13:23:47,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 611 msec 2024-11-23T13:23:47,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 615 msec 2024-11-23T13:23:47,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:47,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:47,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:47,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c5f14dea7c314acb93f088c35a7fc448_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:47,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742422_1598 (size=14794) 2024-11-23T13:23:47,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368287733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368287734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368287736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368287738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368287839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368287839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368287840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:47,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:47,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368287843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368288043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368288043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368288045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368288046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-23T13:23:48,132 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-23T13:23:48,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:48,134 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:48,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-23T13:23:48,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:48,135 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:48,135 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:48,135 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:48,137 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123c5f14dea7c314acb93f088c35a7fc448_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5f14dea7c314acb93f088c35a7fc448_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:48,138 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/28627573b9294956bda0d4c889517a17, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,139 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/28627573b9294956bda0d4c889517a17 is 175, key is test_row_0/A:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:48,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742423_1599 (size=39749) 2024-11-23T13:23:48,142 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=187, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/28627573b9294956bda0d4c889517a17 2024-11-23T13:23:48,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/698b7518c0444d73b6987b71e0715f34 is 50, key is test_row_0/B:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:48,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742424_1600 (size=12151) 2024-11-23T13:23:48,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/698b7518c0444d73b6987b71e0715f34 2024-11-23T13:23:48,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f507ceb8773f4195a0e800b966ad9b2b is 50, key is test_row_0/C:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:48,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742425_1601 (size=12151) 2024-11-23T13:23:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:48,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-23T13:23:48,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:48,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,287 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368288346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368288348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368288349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368288352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:48,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-23T13:23:48,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:48,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:48,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f507ceb8773f4195a0e800b966ad9b2b 2024-11-23T13:23:48,575 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/28627573b9294956bda0d4c889517a17 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17 2024-11-23T13:23:48,578 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17, entries=200, sequenceid=187, filesize=38.8 K 2024-11-23T13:23:48,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/698b7518c0444d73b6987b71e0715f34 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34 2024-11-23T13:23:48,582 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34, entries=150, sequenceid=187, filesize=11.9 K 2024-11-23T13:23:48,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f507ceb8773f4195a0e800b966ad9b2b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b 2024-11-23T13:23:48,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b, entries=150, sequenceid=187, filesize=11.9 K 2024-11-23T13:23:48,586 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 864ms, sequenceid=187, compaction requested=true 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:48,586 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 10 store files, 0 compacting, 10 eligible, 16 blocking 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:48,586 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 10 store files, 0 compacting, 10 eligible, 16 blocking 2024-11-23T13:23:48,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:48,588 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 10 files of size 318644 starting at candidate #0 after considering 36 permutations with 36 in ratio 2024-11-23T13:23:48,588 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 10 files of size 120460 starting at candidate #0 after considering 36 permutations with 36 in ratio 2024-11-23T13:23:48,588 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/A is initiating minor compaction (all files) 2024-11-23T13:23:48,588 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/B is initiating minor compaction (all files) 2024-11-23T13:23:48,588 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/A in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,588 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/B in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,589 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=311.2 K 2024-11-23T13:23:48,589 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=117.6 K 2024-11-23T13:23:48,589 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=6 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,589 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17] 2024-11-23T13:23:48,589 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting bcfa7ff5f6c048b5890acb0e3adf047b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732368213301 2024-11-23T13:23:48,589 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfe50b0ee6a1476faed25a9ecb843ae3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732368213301 2024-11-23T13:23:48,589 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ddce37102f5499388140625ae8d165f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368213309 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 011d29a5d925475f97d42ee19f072e85, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368213309 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 0caebd9a24c9413494651d9973323e2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732368214433 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting db413775ab134f7abd383a01c5fc0061, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732368214433 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a8fed4e12d13411b894ed8a2b6ad2b79, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368215584 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bb8871e3a284085ba0526e980a42c84, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368215584 2024-11-23T13:23:48,590 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 94e03dfa200e473987f11313d7f906e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732368216706 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23da3d253bce40f3ab741a172550195b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732368216706 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 595677f3503d4cb5833d559d94ba8d4b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732368218843 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d93ddadc0f14768aaaf54a30885e219, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732368218843 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b118e068dff42fabe0a6d5fdf164580, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732368220978 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c33c97fe37d4491a5e3849a3b28d64c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732368220978 2024-11-23T13:23:48,591 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting a2c942c500324e9b835936c2b4223c35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732368223123 2024-11-23T13:23:48,592 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82d375a02c6a4edb851a4cad96eee64f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732368223123 2024-11-23T13:23:48,592 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 78a44d4a8e3040fd88cf297b80e19989, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732368225253 2024-11-23T13:23:48,592 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4188367c4b149f8b2aa5767d01c0c40, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732368225253 2024-11-23T13:23:48,592 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,592 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28627573b9294956bda0d4c889517a17, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:48,592 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 698b7518c0444d73b6987b71e0715f34, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:48,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-23T13:23:48,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,592 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:48,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:48,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112326badd594aa84ef6a7f278dac81c86d8_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368227733/Put/seqid=0 2024-11-23T13:23:48,617 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#B#compaction#513 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:48,617 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/af5190d9e4844773826d96f869492da0 is 50, key is test_row_0/B:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:48,619 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742426_1602 (size=12304) 2024-11-23T13:23:48,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:48,627 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112326badd594aa84ef6a7f278dac81c86d8_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112326badd594aa84ef6a7f278dac81c86d8_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/76c6784fd9a74e45abaeb3c384765ba0, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,628 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123989dfc781819439e8367808a9f24f16b_3fb74ed11ce3b976f22e2e146ef6eea6 store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/76c6784fd9a74e45abaeb3c384765ba0 is 175, key is test_row_0/A:col10/1732368227733/Put/seqid=0 2024-11-23T13:23:48,634 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123989dfc781819439e8367808a9f24f16b_3fb74ed11ce3b976f22e2e146ef6eea6, store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,634 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123989dfc781819439e8367808a9f24f16b_3fb74ed11ce3b976f22e2e146ef6eea6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:48,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742427_1603 (size=12493) 2024-11-23T13:23:48,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742428_1604 (size=31105) 2024-11-23T13:23:48,645 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/af5190d9e4844773826d96f869492da0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/af5190d9e4844773826d96f869492da0 2024-11-23T13:23:48,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742429_1605 (size=4469) 2024-11-23T13:23:48,649 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 10 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/B of 3fb74ed11ce3b976f22e2e146ef6eea6 into af5190d9e4844773826d96f869492da0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:48,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:48,649 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/B, priority=6, startTime=1732368228586; duration=0sec 2024-11-23T13:23:48,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:48,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:B 2024-11-23T13:23:48,650 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 10 store files, 0 compacting, 10 eligible, 16 blocking 2024-11-23T13:23:48,652 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 10 files of size 120460 starting at candidate #0 after considering 36 permutations with 36 in ratio 2024-11-23T13:23:48,652 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/C is initiating minor compaction (all files) 2024-11-23T13:23:48,652 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/C in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:48,652 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=117.6 K 2024-11-23T13:23:48,652 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 825933bcdecd461d87664a17a0fa7d77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1732368213301 2024-11-23T13:23:48,652 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting c0464ac6dba54ffc948d0a4ebb63a298, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732368213309 2024-11-23T13:23:48,653 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 432f977297ed462eb3e8742353a7671a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732368214433 2024-11-23T13:23:48,653 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 822762eca48448b2b8fa4d488bc511b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732368215584 2024-11-23T13:23:48,653 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d8f2d06580a41c69b46cab7e8904695, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732368216706 2024-11-23T13:23:48,654 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 94c7a59b011d4e31829643088c32e71f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732368218843 2024-11-23T13:23:48,654 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting e47bb9e303e14a629af700a0108460dd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=128, earliestPutTs=1732368220978 2024-11-23T13:23:48,654 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a5cb15b23ec4a1b8c61249554903891, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=150, earliestPutTs=1732368223123 2024-11-23T13:23:48,654 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 87cc6a8c51c1482ab71f3af3498bd932, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1732368225253 2024-11-23T13:23:48,654 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f507ceb8773f4195a0e800b966ad9b2b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:48,666 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#C#compaction#515 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:48,667 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/ad827c3aa6d84c0f8240f93c7df4410e is 50, key is test_row_0/C:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742430_1606 (size=12493) 2024-11-23T13:23:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:48,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:48,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368288875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368288876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368288876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368288879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368288980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368288980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368288980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:48,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:48,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368288984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,042 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/76c6784fd9a74e45abaeb3c384765ba0 2024-11-23T13:23:49,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b0104cdb39ab495886316f46c7525ccb is 50, key is test_row_0/B:col10/1732368227733/Put/seqid=0 2024-11-23T13:23:49,050 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#A#compaction#514 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:49,051 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9133089381bb4a9b99494341f2f157fd is 175, key is test_row_0/A:col10/1732368227721/Put/seqid=0 2024-11-23T13:23:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742431_1607 (size=12151) 2024-11-23T13:23:49,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742432_1608 (size=31447) 2024-11-23T13:23:49,062 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9133089381bb4a9b99494341f2f157fd as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd 2024-11-23T13:23:49,066 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 10 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/A of 3fb74ed11ce3b976f22e2e146ef6eea6 into 9133089381bb4a9b99494341f2f157fd(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:49,066 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:49,066 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/A, priority=6, startTime=1732368228586; duration=0sec 2024-11-23T13:23:49,066 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:49,066 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:A 2024-11-23T13:23:49,074 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/ad827c3aa6d84c0f8240f93c7df4410e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/ad827c3aa6d84c0f8240f93c7df4410e 2024-11-23T13:23:49,077 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 10 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/C of 3fb74ed11ce3b976f22e2e146ef6eea6 into ad827c3aa6d84c0f8240f93c7df4410e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:49,077 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:49,077 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/C, priority=6, startTime=1732368228586; duration=0sec 2024-11-23T13:23:49,077 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:49,077 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:C 2024-11-23T13:23:49,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368289183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368289184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368289185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368289188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:49,454 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b0104cdb39ab495886316f46c7525ccb 2024-11-23T13:23:49,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/19b51d91054946e694c4c83a3fce6141 is 50, key is test_row_0/C:col10/1732368227733/Put/seqid=0 2024-11-23T13:23:49,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742433_1609 (size=12151) 2024-11-23T13:23:49,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368289485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368289487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368289489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:49,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368289491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:49,869 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/19b51d91054946e694c4c83a3fce6141 2024-11-23T13:23:49,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/76c6784fd9a74e45abaeb3c384765ba0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0 2024-11-23T13:23:49,877 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0, entries=150, sequenceid=200, filesize=30.4 K 2024-11-23T13:23:49,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b0104cdb39ab495886316f46c7525ccb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb 2024-11-23T13:23:49,880 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb, entries=150, sequenceid=200, filesize=11.9 K 2024-11-23T13:23:49,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/19b51d91054946e694c4c83a3fce6141 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141 2024-11-23T13:23:49,884 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141, entries=150, sequenceid=200, filesize=11.9 K 2024-11-23T13:23:49,885 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1293ms, sequenceid=200, compaction requested=false 2024-11-23T13:23:49,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:49,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:49,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-23T13:23:49,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-23T13:23:49,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-23T13:23:49,887 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7510 sec 2024-11-23T13:23:49,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.7530 sec 2024-11-23T13:23:49,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:49,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-23T13:23:49,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:49,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:49,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:49,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:49,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:49,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:49,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123fc34a2ee4d7042d7847a7f01c715217a_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:50,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368289999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368290000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742434_1610 (size=14794) 2024-11-23T13:23:50,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368290001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368290002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368290103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368290103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368290104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368290105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-23T13:23:50,239 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-23T13:23:50,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-23T13:23:50,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-23T13:23:50,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:50,241 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-23T13:23:50,242 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-23T13:23:50,242 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-23T13:23:50,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368290304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368290307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368290307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368290308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:50,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:50,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:50,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,403 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:50,406 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123fc34a2ee4d7042d7847a7f01c715217a_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123fc34a2ee4d7042d7847a7f01c715217a_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:50,407 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/60daec6079134ffc8699958b349e40cb, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:50,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/60daec6079134ffc8699958b349e40cb is 175, key is test_row_0/A:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:50,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742435_1611 (size=39749) 2024-11-23T13:23:50,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:50,546 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:50,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:50,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368290607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368290609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368290610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:50,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368290611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,698 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,813 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=227, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/60daec6079134ffc8699958b349e40cb 2024-11-23T13:23:50,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/3593b74a996b49ea97dba4bdd25da112 is 50, key is test_row_0/B:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:50,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742436_1612 (size=12151) 2024-11-23T13:23:50,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:50,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:50,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:50,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:50,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:50,850 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:50,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,002 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:51,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368291109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:51,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368291111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:51,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368291112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:51,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368291114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:51,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/3593b74a996b49ea97dba4bdd25da112 2024-11-23T13:23:51,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f153a1cfb03d41f7bc5f510c3a85f17f is 50, key is test_row_0/C:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:51,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742437_1613 (size=12151) 2024-11-23T13:23:51,308 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:51,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:51,461 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:51,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,613 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:51,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T13:23:51,633 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f153a1cfb03d41f7bc5f510c3a85f17f 2024-11-23T13:23:51,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/60daec6079134ffc8699958b349e40cb as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb 2024-11-23T13:23:51,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb, entries=200, sequenceid=227, filesize=38.8 K 2024-11-23T13:23:51,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/3593b74a996b49ea97dba4bdd25da112 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112 2024-11-23T13:23:51,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112, entries=150, sequenceid=227, filesize=11.9 K 2024-11-23T13:23:51,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/f153a1cfb03d41f7bc5f510c3a85f17f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f 2024-11-23T13:23:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f, entries=150, sequenceid=227, filesize=11.9 K 2024-11-23T13:23:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,648 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1656ms, sequenceid=227, compaction requested=true 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:51,648 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:51,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:C, priority=-2147483648, current under compaction store size is 3 2024-11-23T13:23:51,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:51,648 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,649 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:51,649 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/A is initiating minor compaction (all files) 2024-11-23T13:23:51,649 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/A in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,649 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=99.9 K 2024-11-23T13:23:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,649 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,649 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. files: [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb] 2024-11-23T13:23:51,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:51,649 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/B is initiating minor compaction (all files) 2024-11-23T13:23:51,649 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/B in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,650 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/af5190d9e4844773826d96f869492da0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=35.9 K 2024-11-23T13:23:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9133089381bb4a9b99494341f2f157fd, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting af5190d9e4844773826d96f869492da0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:51,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76c6784fd9a74e45abaeb3c384765ba0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732368227733 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting b0104cdb39ab495886316f46c7525ccb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732368227733 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60daec6079134ffc8699958b349e40cb, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732368228874 2024-11-23T13:23:51,650 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 3593b74a996b49ea97dba4bdd25da112, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732368228875 2024-11-23T13:23:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,657 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#B#compaction#521 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,657 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/e40de72cd0db449ba1d09caddf8d7c7e is 50, key is test_row_0/B:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:51,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,660 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742438_1614 (size=12595) 2024-11-23T13:23:51,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,662 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241123cd9d909525124866a28184ac9fc65bd7_3fb74ed11ce3b976f22e2e146ef6eea6 store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,664 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241123cd9d909525124866a28184ac9fc65bd7_3fb74ed11ce3b976f22e2e146ef6eea6, store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:51,664 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123cd9d909525124866a28184ac9fc65bd7_3fb74ed11ce3b976f22e2e146ef6eea6 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:51,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,666 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/e40de72cd0db449ba1d09caddf8d7c7e as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/e40de72cd0db449ba1d09caddf8d7c7e 2024-11-23T13:23:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742439_1615 (size=4469) 2024-11-23T13:23:51,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,670 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#A#compaction#522 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,670 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9e3a3450729a45b1aaa6ecd456a6950d is 175, key is test_row_0/A:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,671 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/B of 3fb74ed11ce3b976f22e2e146ef6eea6 into e40de72cd0db449ba1d09caddf8d7c7e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:51,671 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:51,671 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/B, priority=13, startTime=1732368231648; duration=0sec 2024-11-23T13:23:51,671 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-23T13:23:51,671 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:B 2024-11-23T13:23:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,671 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-23T13:23:51,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,672 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-23T13:23:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,672 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1540): 3fb74ed11ce3b976f22e2e146ef6eea6/C is initiating minor compaction (all files) 2024-11-23T13:23:51,672 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3fb74ed11ce3b976f22e2e146ef6eea6/C in TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,672 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/ad827c3aa6d84c0f8240f93c7df4410e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f] into tmpdir=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp, totalSize=35.9 K 2024-11-23T13:23:51,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,673 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting ad827c3aa6d84c0f8240f93c7df4410e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732368227400 2024-11-23T13:23:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,673 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting 19b51d91054946e694c4c83a3fce6141, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1732368227733 2024-11-23T13:23:51,673 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] compactions.Compactor(224): Compacting f153a1cfb03d41f7bc5f510c3a85f17f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=227, earliestPutTs=1732368228875 2024-11-23T13:23:51,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742440_1616 (size=31549) 2024-11-23T13:23:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,678 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/9e3a3450729a45b1aaa6ecd456a6950d as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9e3a3450729a45b1aaa6ecd456a6950d 2024-11-23T13:23:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,680 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3fb74ed11ce3b976f22e2e146ef6eea6#C#compaction#523 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-23T13:23:51,680 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/8f75672142a340d49dffdd68fcc075ae is 50, key is test_row_0/C:col10/1732368228875/Put/seqid=0 2024-11-23T13:23:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,681 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/A of 3fb74ed11ce3b976f22e2e146ef6eea6 into 9e3a3450729a45b1aaa6ecd456a6950d(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:51,681 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:51,681 INFO [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/A, priority=13, startTime=1732368231648; duration=0sec 2024-11-23T13:23:51,682 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:51,682 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:A 2024-11-23T13:23:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742441_1617 (size=12595) 2024-11-23T13:23:51,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,687 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/8f75672142a340d49dffdd68fcc075ae as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8f75672142a340d49dffdd68fcc075ae 2024-11-23T13:23:51,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,691 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3fb74ed11ce3b976f22e2e146ef6eea6/C of 3fb74ed11ce3b976f22e2e146ef6eea6 into 8f75672142a340d49dffdd68fcc075ae(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-23T13:23:51,691 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:51,691 INFO [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6., storeName=3fb74ed11ce3b976f22e2e146ef6eea6/C, priority=13, startTime=1732368231648; duration=0sec 2024-11-23T13:23:51,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,691 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:51,691 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:C 2024-11-23T13:23:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,765 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,765 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33173 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-23T13:23:51,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,765 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f325f95aed4e437fa7adad17a69850e0_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368230000/Put/seqid=0 2024-11-23T13:23:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742442_1618 (size=9814) 2024-11-23T13:23:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:52,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. as already flushing 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368292166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368292166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368292166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368292167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:52,180 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241123f325f95aed4e437fa7adad17a69850e0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f325f95aed4e437fa7adad17a69850e0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:52,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/13d6ae6c290f49f5ae6fea211867739b, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:52,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/13d6ae6c290f49f5ae6fea211867739b is 175, key is test_row_0/A:col10/1732368230000/Put/seqid=0 2024-11-23T13:23:52,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742443_1619 (size=22461) 2024-11-23T13:23:52,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368292269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368292269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,270 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368292269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368292269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368292471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368292471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368292471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368292471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,589 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=242, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/13d6ae6c290f49f5ae6fea211867739b 2024-11-23T13:23:52,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/86c00d6117044db7b18a66fe0740dbe0 is 50, key is test_row_0/B:col10/1732368230000/Put/seqid=0 2024-11-23T13:23:52,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742444_1620 (size=9757) 2024-11-23T13:23:52,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368292773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368292773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368292773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:52,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:52,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368292775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:53,000 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/86c00d6117044db7b18a66fe0740dbe0 2024-11-23T13:23:53,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e8feb58dda2640bcb4a84600c2f991ed is 50, key is test_row_0/C:col10/1732368230000/Put/seqid=0 2024-11-23T13:23:53,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742445_1621 (size=9757) 2024-11-23T13:23:53,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39372 deadline: 1732368293276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:53,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39316 deadline: 1732368293277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:53,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39352 deadline: 1732368293279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:53,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-23T13:23:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39342 deadline: 1732368293280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:53,284 DEBUG [Thread-2490 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:51875 2024-11-23T13:23:53,284 DEBUG [Thread-2490 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,285 DEBUG [Thread-2498 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:51875 2024-11-23T13:23:53,285 DEBUG [Thread-2498 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,285 DEBUG [Thread-2496 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:51875 2024-11-23T13:23:53,285 DEBUG [Thread-2496 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,285 DEBUG [Thread-2492 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:51875 2024-11-23T13:23:53,285 DEBUG [Thread-2492 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,286 DEBUG [Thread-2494 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:51875 2024-11-23T13:23:53,286 DEBUG [Thread-2494 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,410 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=242 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e8feb58dda2640bcb4a84600c2f991ed 2024-11-23T13:23:53,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/13d6ae6c290f49f5ae6fea211867739b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/13d6ae6c290f49f5ae6fea211867739b 2024-11-23T13:23:53,416 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/13d6ae6c290f49f5ae6fea211867739b, entries=100, sequenceid=242, filesize=21.9 K 2024-11-23T13:23:53,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/86c00d6117044db7b18a66fe0740dbe0 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/86c00d6117044db7b18a66fe0740dbe0 2024-11-23T13:23:53,419 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/86c00d6117044db7b18a66fe0740dbe0, entries=100, sequenceid=242, filesize=9.5 K 2024-11-23T13:23:53,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/e8feb58dda2640bcb4a84600c2f991ed as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e8feb58dda2640bcb4a84600c2f991ed 2024-11-23T13:23:53,421 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e8feb58dda2640bcb4a84600c2f991ed, entries=100, sequenceid=242, filesize=9.5 K 2024-11-23T13:23:53,422 INFO [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1657ms, sequenceid=242, compaction requested=false 2024-11-23T13:23:53,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:53,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:53,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ba2e440802a7:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-23T13:23:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-23T13:23:53,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-23T13:23:53,424 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1810 sec 2024-11-23T13:23:53,425 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 3.1840 sec 2024-11-23T13:23:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33173 {}] regionserver.HRegion(8581): Flush requested on 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:53,767 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-23T13:23:53,767 DEBUG [Thread-2483 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:51875 2024-11-23T13:23:53,767 DEBUG [Thread-2483 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:53,767 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:53,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112394cc4b91edbb442a97269a76c8b6c5a2_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368233766/Put/seqid=0 2024-11-23T13:23:53,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742446_1622 (size=12454) 2024-11-23T13:23:54,176 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:54,179 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112394cc4b91edbb442a97269a76c8b6c5a2_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112394cc4b91edbb442a97269a76c8b6c5a2_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:54,179 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/b75ffba7ece14b7f95e773e37764c9ee, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:54,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/b75ffba7ece14b7f95e773e37764c9ee is 175, key is test_row_0/A:col10/1732368233766/Put/seqid=0 2024-11-23T13:23:54,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742447_1623 (size=31255) 2024-11-23T13:23:54,280 DEBUG [Thread-2479 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:51875 2024-11-23T13:23:54,280 DEBUG [Thread-2479 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:54,285 DEBUG [Thread-2487 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:51875 2024-11-23T13:23:54,285 DEBUG [Thread-2487 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:54,286 DEBUG [Thread-2481 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:51875 2024-11-23T13:23:54,286 DEBUG [Thread-2481 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:54,289 DEBUG [Thread-2485 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:51875 2024-11-23T13:23:54,289 DEBUG [Thread-2485 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:54,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-23T13:23:54,345 INFO [Thread-2489 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 15 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5934 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6028 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5825 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5952 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6029 2024-11-23T13:23:54,346 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-23T13:23:54,346 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:23:54,346 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:51875 2024-11-23T13:23:54,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:54,346 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-23T13:23:54,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-23T13:23:54,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:54,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:54,349 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368234349"}]},"ts":"1732368234349"} 2024-11-23T13:23:54,350 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-23T13:23:54,352 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-23T13:23:54,352 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-23T13:23:54,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, UNASSIGN}] 2024-11-23T13:23:54,353 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, UNASSIGN 2024-11-23T13:23:54,354 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=CLOSING, regionLocation=ba2e440802a7,33173,1732368061317 2024-11-23T13:23:54,354 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T13:23:54,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; CloseRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317}] 2024-11-23T13:23:54,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:54,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to ba2e440802a7,33173,1732368061317 2024-11-23T13:23:54,506 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(124): Close 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:54,506 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-23T13:23:54,506 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1681): Closing 3fb74ed11ce3b976f22e2e146ef6eea6, disabling compactions & flushes 2024-11-23T13:23:54,506 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:54,584 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=267, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/b75ffba7ece14b7f95e773e37764c9ee 2024-11-23T13:23:54,589 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/4e8fe24eb56142fd98fa109d67ce0f9f is 50, key is test_row_0/B:col10/1732368233766/Put/seqid=0 2024-11-23T13:23:54,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742448_1624 (size=12301) 2024-11-23T13:23:54,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:54,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:54,993 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/4e8fe24eb56142fd98fa109d67ce0f9f 2024-11-23T13:23:54,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/21226794ef6544b78e5e1bc6bdd09283 is 50, key is test_row_0/C:col10/1732368233766/Put/seqid=0 2024-11-23T13:23:55,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742449_1625 (size=12301) 2024-11-23T13:23:55,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=267 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/21226794ef6544b78e5e1bc6bdd09283 2024-11-23T13:23:55,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/b75ffba7ece14b7f95e773e37764c9ee as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/b75ffba7ece14b7f95e773e37764c9ee 2024-11-23T13:23:55,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/b75ffba7ece14b7f95e773e37764c9ee, entries=150, sequenceid=267, filesize=30.5 K 2024-11-23T13:23:55,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/4e8fe24eb56142fd98fa109d67ce0f9f as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/4e8fe24eb56142fd98fa109d67ce0f9f 2024-11-23T13:23:55,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/4e8fe24eb56142fd98fa109d67ce0f9f, entries=150, sequenceid=267, filesize=12.0 K 2024-11-23T13:23:55,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/21226794ef6544b78e5e1bc6bdd09283 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/21226794ef6544b78e5e1bc6bdd09283 2024-11-23T13:23:55,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/21226794ef6544b78e5e1bc6bdd09283, entries=150, sequenceid=267, filesize=12.0 K 2024-11-23T13:23:55,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=26.84 KB/27480 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1648ms, sequenceid=267, compaction requested=true 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:55,416 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:A, priority=-2147483648, current under compaction store size is 1 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. after waiting 0 ms 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. because compaction request was cancelled 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:B, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:A 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3fb74ed11ce3b976f22e2e146ef6eea6:C, priority=-2147483648, current under compaction store size is 2 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. because compaction request was cancelled 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. because compaction request was cancelled 2024-11-23T13:23:55,416 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(2837): Flushing 3fb74ed11ce3b976f22e2e146ef6eea6 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-23T13:23:55,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:C 2024-11-23T13:23:55,416 DEBUG [RS:0;ba2e440802a7:33173-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3fb74ed11ce3b976f22e2e146ef6eea6:B 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=A 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=B 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3fb74ed11ce3b976f22e2e146ef6eea6, store=C 2024-11-23T13:23:55,416 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-23T13:23:55,420 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239fce40223a4c43ceb7b8d5460eb97f5d_3fb74ed11ce3b976f22e2e146ef6eea6 is 50, key is test_row_0/A:col10/1732368234288/Put/seqid=0 2024-11-23T13:23:55,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742450_1626 (size=9914) 2024-11-23T13:23:55,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:55,823 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T13:23:55,826 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411239fce40223a4c43ceb7b8d5460eb97f5d_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239fce40223a4c43ceb7b8d5460eb97f5d_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:55,827 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/0c31419bee7346639019ef376e84f345, store: [table=TestAcidGuarantees family=A region=3fb74ed11ce3b976f22e2e146ef6eea6] 2024-11-23T13:23:55,827 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/0c31419bee7346639019ef376e84f345 is 175, key is test_row_0/A:col10/1732368234288/Put/seqid=0 2024-11-23T13:23:55,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742451_1627 (size=22561) 2024-11-23T13:23:56,231 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/0c31419bee7346639019ef376e84f345 2024-11-23T13:23:56,235 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b4d1a2bcd6b94cf4a7538c9d321abab7 is 50, key is test_row_0/B:col10/1732368234288/Put/seqid=0 2024-11-23T13:23:56,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742452_1628 (size=9857) 2024-11-23T13:23:56,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:56,639 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b4d1a2bcd6b94cf4a7538c9d321abab7 2024-11-23T13:23:56,644 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/24db1b15953a4834844ced3532354398 is 50, key is test_row_0/C:col10/1732368234288/Put/seqid=0 2024-11-23T13:23:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742453_1629 (size=9857) 2024-11-23T13:23:57,047 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/24db1b15953a4834844ced3532354398 2024-11-23T13:23:57,050 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/A/0c31419bee7346639019ef376e84f345 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/0c31419bee7346639019ef376e84f345 2024-11-23T13:23:57,053 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/0c31419bee7346639019ef376e84f345, entries=100, sequenceid=274, filesize=22.0 K 2024-11-23T13:23:57,053 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/B/b4d1a2bcd6b94cf4a7538c9d321abab7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b4d1a2bcd6b94cf4a7538c9d321abab7 2024-11-23T13:23:57,056 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b4d1a2bcd6b94cf4a7538c9d321abab7, entries=100, sequenceid=274, filesize=9.6 K 2024-11-23T13:23:57,056 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/.tmp/C/24db1b15953a4834844ced3532354398 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/24db1b15953a4834844ced3532354398 2024-11-23T13:23:57,059 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/24db1b15953a4834844ced3532354398, entries=100, sequenceid=274, filesize=9.6 K 2024-11-23T13:23:57,059 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 3fb74ed11ce3b976f22e2e146ef6eea6 in 1643ms, sequenceid=274, compaction requested=true 2024-11-23T13:23:57,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb] to archive 2024-11-23T13:23:57,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:57,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/bfe50b0ee6a1476faed25a9ecb843ae3 2024-11-23T13:23:57,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/011d29a5d925475f97d42ee19f072e85 2024-11-23T13:23:57,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/db413775ab134f7abd383a01c5fc0061 2024-11-23T13:23:57,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/4bb8871e3a284085ba0526e980a42c84 2024-11-23T13:23:57,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/23da3d253bce40f3ab741a172550195b 2024-11-23T13:23:57,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9d93ddadc0f14768aaaf54a30885e219 2024-11-23T13:23:57,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/7c33c97fe37d4491a5e3849a3b28d64c 2024-11-23T13:23:57,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/82d375a02c6a4edb851a4cad96eee64f 2024-11-23T13:23:57,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/e4188367c4b149f8b2aa5767d01c0c40 2024-11-23T13:23:57,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/28627573b9294956bda0d4c889517a17 2024-11-23T13:23:57,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9133089381bb4a9b99494341f2f157fd 2024-11-23T13:23:57,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/76c6784fd9a74e45abaeb3c384765ba0 2024-11-23T13:23:57,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/60daec6079134ffc8699958b349e40cb 2024-11-23T13:23:57,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/af5190d9e4844773826d96f869492da0, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112] to archive 2024-11-23T13:23:57,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:57,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/bcfa7ff5f6c048b5890acb0e3adf047b 2024-11-23T13:23:57,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/2ddce37102f5499388140625ae8d165f 2024-11-23T13:23:57,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/0caebd9a24c9413494651d9973323e2d 2024-11-23T13:23:57,075 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a8fed4e12d13411b894ed8a2b6ad2b79 2024-11-23T13:23:57,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/94e03dfa200e473987f11313d7f906e2 2024-11-23T13:23:57,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/595677f3503d4cb5833d559d94ba8d4b 2024-11-23T13:23:57,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/8b118e068dff42fabe0a6d5fdf164580 2024-11-23T13:23:57,078 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/a2c942c500324e9b835936c2b4223c35 2024-11-23T13:23:57,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/78a44d4a8e3040fd88cf297b80e19989 2024-11-23T13:23:57,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/af5190d9e4844773826d96f869492da0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/af5190d9e4844773826d96f869492da0 2024-11-23T13:23:57,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/698b7518c0444d73b6987b71e0715f34 2024-11-23T13:23:57,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b0104cdb39ab495886316f46c7525ccb 2024-11-23T13:23:57,082 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/3593b74a996b49ea97dba4bdd25da112 2024-11-23T13:23:57,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/ad827c3aa6d84c0f8240f93c7df4410e, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f] to archive 2024-11-23T13:23:57,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-23T13:23:57,084 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/825933bcdecd461d87664a17a0fa7d77 2024-11-23T13:23:57,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/c0464ac6dba54ffc948d0a4ebb63a298 2024-11-23T13:23:57,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/432f977297ed462eb3e8742353a7671a 2024-11-23T13:23:57,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/822762eca48448b2b8fa4d488bc511b8 2024-11-23T13:23:57,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/3d8f2d06580a41c69b46cab7e8904695 2024-11-23T13:23:57,088 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/94c7a59b011d4e31829643088c32e71f 2024-11-23T13:23:57,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e47bb9e303e14a629af700a0108460dd 2024-11-23T13:23:57,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8a5cb15b23ec4a1b8c61249554903891 2024-11-23T13:23:57,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/87cc6a8c51c1482ab71f3af3498bd932 2024-11-23T13:23:57,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/ad827c3aa6d84c0f8240f93c7df4410e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/ad827c3aa6d84c0f8240f93c7df4410e 2024-11-23T13:23:57,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f507ceb8773f4195a0e800b966ad9b2b 2024-11-23T13:23:57,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/19b51d91054946e694c4c83a3fce6141 2024-11-23T13:23:57,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/f153a1cfb03d41f7bc5f510c3a85f17f 2024-11-23T13:23:57,096 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits/277.seqid, newMaxSeqId=277, maxSeqId=4 2024-11-23T13:23:57,096 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6. 2024-11-23T13:23:57,096 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] regionserver.HRegion(1635): Region close journal for 3fb74ed11ce3b976f22e2e146ef6eea6: 2024-11-23T13:23:57,098 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION, pid=182}] handler.UnassignRegionHandler(170): Closed 3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:57,098 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=3fb74ed11ce3b976f22e2e146ef6eea6, regionState=CLOSED 2024-11-23T13:23:57,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-23T13:23:57,100 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; CloseRegionProcedure 3fb74ed11ce3b976f22e2e146ef6eea6, server=ba2e440802a7,33173,1732368061317 in 2.7450 sec 2024-11-23T13:23:57,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-11-23T13:23:57,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3fb74ed11ce3b976f22e2e146ef6eea6, UNASSIGN in 2.7470 sec 2024-11-23T13:23:57,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-23T13:23:57,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.7490 sec 2024-11-23T13:23:57,103 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732368237102"}]},"ts":"1732368237102"} 2024-11-23T13:23:57,103 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-23T13:23:57,105 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-23T13:23:57,106 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.7580 sec 2024-11-23T13:23:58,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-23T13:23:58,453 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-23T13:23:58,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-23T13:23:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,454 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=183, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-23T13:23:58,455 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=183, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,457 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,458 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C, FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits] 2024-11-23T13:23:58,460 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/0c31419bee7346639019ef376e84f345 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/0c31419bee7346639019ef376e84f345 2024-11-23T13:23:58,461 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/13d6ae6c290f49f5ae6fea211867739b to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/13d6ae6c290f49f5ae6fea211867739b 2024-11-23T13:23:58,461 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9e3a3450729a45b1aaa6ecd456a6950d to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/9e3a3450729a45b1aaa6ecd456a6950d 2024-11-23T13:23:58,462 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/b75ffba7ece14b7f95e773e37764c9ee to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/A/b75ffba7ece14b7f95e773e37764c9ee 2024-11-23T13:23:58,464 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/4e8fe24eb56142fd98fa109d67ce0f9f to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/4e8fe24eb56142fd98fa109d67ce0f9f 2024-11-23T13:23:58,464 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/86c00d6117044db7b18a66fe0740dbe0 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/86c00d6117044db7b18a66fe0740dbe0 2024-11-23T13:23:58,465 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b4d1a2bcd6b94cf4a7538c9d321abab7 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/b4d1a2bcd6b94cf4a7538c9d321abab7 2024-11-23T13:23:58,466 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/e40de72cd0db449ba1d09caddf8d7c7e to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/B/e40de72cd0db449ba1d09caddf8d7c7e 2024-11-23T13:23:58,467 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/21226794ef6544b78e5e1bc6bdd09283 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/21226794ef6544b78e5e1bc6bdd09283 2024-11-23T13:23:58,468 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/24db1b15953a4834844ced3532354398 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/24db1b15953a4834844ced3532354398 2024-11-23T13:23:58,469 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8f75672142a340d49dffdd68fcc075ae to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/8f75672142a340d49dffdd68fcc075ae 2024-11-23T13:23:58,469 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e8feb58dda2640bcb4a84600c2f991ed to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/C/e8feb58dda2640bcb4a84600c2f991ed 2024-11-23T13:23:58,471 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits/277.seqid to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6/recovered.edits/277.seqid 2024-11-23T13:23:58,471 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/default/TestAcidGuarantees/3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,471 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-23T13:23:58,472 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:23:58,472 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-23T13:23:58,474 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112326badd594aa84ef6a7f278dac81c86d8_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112326badd594aa84ef6a7f278dac81c86d8_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,475 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c1f0d7ed24c49279715a7a77cdaf78d_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236c1f0d7ed24c49279715a7a77cdaf78d_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,476 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236d5b3d8247784e7d9fcd20432249ba6f_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411236d5b3d8247784e7d9fcd20432249ba6f_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,477 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112371d5f47e178444a2a45ad7aee55b4755_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112371d5f47e178444a2a45ad7aee55b4755_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,478 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237442b66dadca452fb0c2bcb88fbde753_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411237442b66dadca452fb0c2bcb88fbde753_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,479 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112394cc4b91edbb442a97269a76c8b6c5a2_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112394cc4b91edbb442a97269a76c8b6c5a2_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,480 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239665e3fd3d884a65a447de1868366876_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239665e3fd3d884a65a447de1868366876_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,480 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123998a685ac39045a79b00a69b633b31d0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123998a685ac39045a79b00a69b633b31d0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,481 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239fce40223a4c43ceb7b8d5460eb97f5d_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411239fce40223a4c43ceb7b8d5460eb97f5d_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,482 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a617eaf743f24252a548772f82694f55_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123a617eaf743f24252a548772f82694f55_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,483 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123bae928c3d5504437af072bfc5826dd62_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123bae928c3d5504437af072bfc5826dd62_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,484 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5f14dea7c314acb93f088c35a7fc448_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123c5f14dea7c314acb93f088c35a7fc448_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,485 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f095e7e963c44f70923bff6e3e464ec0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f095e7e963c44f70923bff6e3e464ec0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,486 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f325f95aed4e437fa7adad17a69850e0_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123f325f95aed4e437fa7adad17a69850e0_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,486 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123fc34a2ee4d7042d7847a7f01c715217a_3fb74ed11ce3b976f22e2e146ef6eea6 to hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241123fc34a2ee4d7042d7847a7f01c715217a_3fb74ed11ce3b976f22e2e146ef6eea6 2024-11-23T13:23:58,487 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-23T13:23:58,488 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=183, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,490 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-23T13:23:58,492 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-23T13:23:58,492 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=183, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,493 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-23T13:23:58,493 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732368238493"}]},"ts":"9223372036854775807"} 2024-11-23T13:23:58,494 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-23T13:23:58,494 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3fb74ed11ce3b976f22e2e146ef6eea6, NAME => 'TestAcidGuarantees,,1732368210222.3fb74ed11ce3b976f22e2e146ef6eea6.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T13:23:58,494 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-23T13:23:58,494 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732368238494"}]},"ts":"9223372036854775807"} 2024-11-23T13:23:58,495 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-23T13:23:58,497 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=183, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-23T13:23:58,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 44 msec 2024-11-23T13:23:58,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46617 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-23T13:23:58,556 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-23T13:23:58,565 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=240 (was 237) - Thread LEAK? -, OpenFileDescriptor=455 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=282 (was 277) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3637 (was 3644) 2024-11-23T13:23:58,565 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-23T13:23:58,565 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-23T13:23:58,565 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e541e88 to 127.0.0.1:51875 2024-11-23T13:23:58,565 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:58,565 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T13:23:58,565 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=690286051, stopped=false 2024-11-23T13:23:58,565 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=ba2e440802a7,46617,1732368060590 2024-11-23T13:23:58,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T13:23:58,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T13:23:58,567 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-23T13:23:58,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:23:58,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:23:58,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T13:23:58,568 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T13:23:58,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:58,568 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ba2e440802a7,33173,1732368061317' ***** 2024-11-23T13:23:58,568 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-23T13:23:58,569 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T13:23:58,569 INFO [RS:0;ba2e440802a7:33173 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T13:23:58,569 INFO [RS:0;ba2e440802a7:33173 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T13:23:58,569 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-23T13:23:58,569 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(3579): Received CLOSE for ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:23:58,569 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1224): stopping server ba2e440802a7,33173,1732368061317 2024-11-23T13:23:58,569 DEBUG [RS:0;ba2e440802a7:33173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:58,570 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T13:23:58,570 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T13:23:58,570 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T13:23:58,570 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing ee2ee0e805ec7a6fa6f5f67efb41c78f, disabling compactions & flushes 2024-11-23T13:23:58,570 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-23T13:23:58,570 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:23:58,570 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1603): Online Regions={ee2ee0e805ec7a6fa6f5f67efb41c78f=hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. after waiting 0 ms 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:23:58,570 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing ee2ee0e805ec7a6fa6f5f67efb41c78f 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-23T13:23:58,570 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T13:23:58,570 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T13:23:58,570 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-23T13:23:58,570 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:23:58,571 INFO [regionserver/ba2e440802a7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T13:23:58,586 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/.tmp/info/7654a80d93684baca2443ce0072c4e05 is 45, key is default/info:d/1732368065822/Put/seqid=0 2024-11-23T13:23:58,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742454_1630 (size=5037) 2024-11-23T13:23:58,590 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/info/e85b3019b2444ab1ae04d64edb1e11b6 is 143, key is hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f./info:regioninfo/1732368065702/Put/seqid=0 2024-11-23T13:23:58,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742455_1631 (size=7725) 2024-11-23T13:23:58,771 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:23:58,971 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, ee2ee0e805ec7a6fa6f5f67efb41c78f 2024-11-23T13:23:58,990 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/.tmp/info/7654a80d93684baca2443ce0072c4e05 2024-11-23T13:23:58,993 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/.tmp/info/7654a80d93684baca2443ce0072c4e05 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/info/7654a80d93684baca2443ce0072c4e05 2024-11-23T13:23:58,993 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/info/e85b3019b2444ab1ae04d64edb1e11b6 2024-11-23T13:23:58,996 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/info/7654a80d93684baca2443ce0072c4e05, entries=2, sequenceid=6, filesize=4.9 K 2024-11-23T13:23:58,996 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for ee2ee0e805ec7a6fa6f5f67efb41c78f in 426ms, sequenceid=6, compaction requested=false 2024-11-23T13:23:58,999 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/namespace/ee2ee0e805ec7a6fa6f5f67efb41c78f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T13:23:59,000 INFO [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:23:59,000 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for ee2ee0e805ec7a6fa6f5f67efb41c78f: 2024-11-23T13:23:59,000 DEBUG [RS_CLOSE_REGION-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732368064871.ee2ee0e805ec7a6fa6f5f67efb41c78f. 2024-11-23T13:23:59,012 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/rep_barrier/a7129459460c46bcb8d4ac1fb63690d8 is 102, key is TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f./rep_barrier:/1732368089920/DeleteFamily/seqid=0 2024-11-23T13:23:59,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742456_1632 (size=6025) 2024-11-23T13:23:59,171 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T13:23:59,371 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T13:23:59,416 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/rep_barrier/a7129459460c46bcb8d4ac1fb63690d8 2024-11-23T13:23:59,433 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/table/0db24f5fb6e246098e92dd0548bfa5d7 is 96, key is TestAcidGuarantees,,1732368066066.519df349e6147d27e7c8246089c4409f./table:/1732368089920/DeleteFamily/seqid=0 2024-11-23T13:23:59,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742457_1633 (size=5942) 2024-11-23T13:23:59,572 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-23T13:23:59,572 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-23T13:23:59,572 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T13:23:59,580 INFO [regionserver/ba2e440802a7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-23T13:23:59,580 INFO [regionserver/ba2e440802a7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-23T13:23:59,720 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T13:23:59,772 DEBUG [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-23T13:23:59,837 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/table/0db24f5fb6e246098e92dd0548bfa5d7 2024-11-23T13:23:59,840 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/info/e85b3019b2444ab1ae04d64edb1e11b6 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/info/e85b3019b2444ab1ae04d64edb1e11b6 2024-11-23T13:23:59,843 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/info/e85b3019b2444ab1ae04d64edb1e11b6, entries=22, sequenceid=93, filesize=7.5 K 2024-11-23T13:23:59,844 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/rep_barrier/a7129459460c46bcb8d4ac1fb63690d8 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/rep_barrier/a7129459460c46bcb8d4ac1fb63690d8 2024-11-23T13:23:59,846 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/rep_barrier/a7129459460c46bcb8d4ac1fb63690d8, entries=6, sequenceid=93, filesize=5.9 K 2024-11-23T13:23:59,847 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/.tmp/table/0db24f5fb6e246098e92dd0548bfa5d7 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/table/0db24f5fb6e246098e92dd0548bfa5d7 2024-11-23T13:23:59,849 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/table/0db24f5fb6e246098e92dd0548bfa5d7, entries=9, sequenceid=93, filesize=5.8 K 2024-11-23T13:23:59,849 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1279ms, sequenceid=93, compaction requested=false 2024-11-23T13:23:59,853 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-23T13:23:59,853 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T13:23:59,853 INFO [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-23T13:23:59,853 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-23T13:23:59,853 DEBUG [RS_CLOSE_META-regionserver/ba2e440802a7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T13:23:59,972 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1250): stopping server ba2e440802a7,33173,1732368061317; all regions closed. 2024-11-23T13:23:59,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741834_1010 (size=26050) 2024-11-23T13:23:59,978 DEBUG [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/oldWALs 2024-11-23T13:23:59,978 INFO [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ba2e440802a7%2C33173%2C1732368061317.meta:.meta(num 1732368064627) 2024-11-23T13:23:59,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741832_1008 (size=14828893) 2024-11-23T13:23:59,981 DEBUG [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/oldWALs 2024-11-23T13:23:59,981 INFO [RS:0;ba2e440802a7:33173 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL ba2e440802a7%2C33173%2C1732368061317:(num 1732368063674) 2024-11-23T13:23:59,981 DEBUG [RS:0;ba2e440802a7:33173 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:59,981 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T13:23:59,981 INFO [RS:0;ba2e440802a7:33173 {}] hbase.ChoreService(370): Chore service for: regionserver/ba2e440802a7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-23T13:23:59,981 INFO [regionserver/ba2e440802a7:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T13:23:59,982 INFO [RS:0;ba2e440802a7:33173 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33173 2024-11-23T13:23:59,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ba2e440802a7,33173,1732368061317 2024-11-23T13:23:59,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T13:23:59,986 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007f42608f1d48@5c3f393c rejected from java.util.concurrent.ThreadPoolExecutor@193dbcdf[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-23T13:23:59,986 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ba2e440802a7,33173,1732368061317] 2024-11-23T13:23:59,986 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing ba2e440802a7,33173,1732368061317; numProcessing=1 2024-11-23T13:23:59,988 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/ba2e440802a7,33173,1732368061317 already deleted, retry=false 2024-11-23T13:23:59,988 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; ba2e440802a7,33173,1732368061317 expired; onlineServers=0 2024-11-23T13:23:59,988 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'ba2e440802a7,46617,1732368060590' ***** 2024-11-23T13:23:59,988 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T13:23:59,989 DEBUG [M:0;ba2e440802a7:46617 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f4d7207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ba2e440802a7/172.17.0.2:0 2024-11-23T13:23:59,989 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegionServer(1224): stopping server ba2e440802a7,46617,1732368060590 2024-11-23T13:23:59,989 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegionServer(1250): stopping server ba2e440802a7,46617,1732368060590; all regions closed. 2024-11-23T13:23:59,989 DEBUG [M:0;ba2e440802a7:46617 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T13:23:59,989 DEBUG [M:0;ba2e440802a7:46617 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T13:23:59,989 DEBUG [M:0;ba2e440802a7:46617 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T13:23:59,989 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T13:23:59,989 DEBUG [master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.large.0-1732368063389 {}] cleaner.HFileCleaner(306): Exit Thread[master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.large.0-1732368063389,5,FailOnTimeoutGroup] 2024-11-23T13:23:59,989 DEBUG [master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.small.0-1732368063390 {}] cleaner.HFileCleaner(306): Exit Thread[master/ba2e440802a7:0:becomeActiveMaster-HFileCleaner.small.0-1732368063390,5,FailOnTimeoutGroup] 2024-11-23T13:23:59,989 INFO [M:0;ba2e440802a7:46617 {}] hbase.ChoreService(370): Chore service for: master/ba2e440802a7:0 had [] on shutdown 2024-11-23T13:23:59,989 DEBUG [M:0;ba2e440802a7:46617 {}] master.HMaster(1733): Stopping service threads 2024-11-23T13:23:59,989 INFO [M:0;ba2e440802a7:46617 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T13:23:59,989 ERROR [M:0;ba2e440802a7:46617 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:34115 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:34115,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-23T13:23:59,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T13:23:59,990 INFO [M:0;ba2e440802a7:46617 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T13:23:59,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T13:23:59,990 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T13:23:59,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T13:23:59,990 DEBUG [M:0;ba2e440802a7:46617 {}] zookeeper.ZKUtil(347): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T13:23:59,990 WARN [M:0;ba2e440802a7:46617 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T13:23:59,990 INFO [M:0;ba2e440802a7:46617 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-23T13:23:59,991 INFO [M:0;ba2e440802a7:46617 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T13:23:59,991 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T13:23:59,991 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:23:59,991 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:23:59,991 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T13:23:59,991 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:23:59,991 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=762.45 KB heapSize=936.63 KB 2024-11-23T13:24:00,006 DEBUG [M:0;ba2e440802a7:46617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a62014e505734bbd8cb6485e93b1ad11 is 82, key is hbase:meta,,1/info:regioninfo/1732368064765/Put/seqid=0 2024-11-23T13:24:00,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742458_1634 (size=5672) 2024-11-23T13:24:00,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T13:24:00,088 INFO [RS:0;ba2e440802a7:33173 {}] regionserver.HRegionServer(1307): Exiting; stopping=ba2e440802a7,33173,1732368061317; zookeeper connection closed. 2024-11-23T13:24:00,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33173-0x100248a866f0001, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T13:24:00,088 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46a40b11 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46a40b11 2024-11-23T13:24:00,088 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-23T13:24:00,410 INFO [M:0;ba2e440802a7:46617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2136 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a62014e505734bbd8cb6485e93b1ad11 2024-11-23T13:24:00,430 DEBUG [M:0;ba2e440802a7:46617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1f7f586328d94ba28832e9b6aa4c5d07 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\x9A/proc:d/1732368213238/Put/seqid=0 2024-11-23T13:24:00,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742459_1635 (size=45794) 2024-11-23T13:24:00,834 INFO [M:0;ba2e440802a7:46617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=761.89 KB at sequenceid=2136 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1f7f586328d94ba28832e9b6aa4c5d07 2024-11-23T13:24:00,837 INFO [M:0;ba2e440802a7:46617 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1f7f586328d94ba28832e9b6aa4c5d07 2024-11-23T13:24:00,853 DEBUG [M:0;ba2e440802a7:46617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42130665011747889692535d79821b5b is 69, key is ba2e440802a7,33173,1732368061317/rs:state/1732368063443/Put/seqid=0 2024-11-23T13:24:00,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073742460_1636 (size=5156) 2024-11-23T13:24:01,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T13:24:01,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T13:24:01,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-23T13:24:01,076 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-23T13:24:01,257 INFO [M:0;ba2e440802a7:46617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2136 (bloomFilter=true), to=hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42130665011747889692535d79821b5b 2024-11-23T13:24:01,260 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a62014e505734bbd8cb6485e93b1ad11 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a62014e505734bbd8cb6485e93b1ad11 2024-11-23T13:24:01,262 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a62014e505734bbd8cb6485e93b1ad11, entries=8, sequenceid=2136, filesize=5.5 K 2024-11-23T13:24:01,263 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1f7f586328d94ba28832e9b6aa4c5d07 as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1f7f586328d94ba28832e9b6aa4c5d07 2024-11-23T13:24:01,265 INFO [M:0;ba2e440802a7:46617 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1f7f586328d94ba28832e9b6aa4c5d07 2024-11-23T13:24:01,265 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1f7f586328d94ba28832e9b6aa4c5d07, entries=183, sequenceid=2136, filesize=44.7 K 2024-11-23T13:24:01,266 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/42130665011747889692535d79821b5b as hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42130665011747889692535d79821b5b 2024-11-23T13:24:01,268 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34115/user/jenkins/test-data/4a47f29c-bf1f-9f22-ba14-958d858081f7/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/42130665011747889692535d79821b5b, entries=1, sequenceid=2136, filesize=5.0 K 2024-11-23T13:24:01,269 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(3040): Finished flush of dataSize ~762.45 KB/780747, heapSize ~936.33 KB/958800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1278ms, sequenceid=2136, compaction requested=false 2024-11-23T13:24:01,270 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T13:24:01,270 DEBUG [M:0;ba2e440802a7:46617 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-23T13:24:01,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44873 is added to blk_1073741830_1006 (size=920842) 2024-11-23T13:24:01,273 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-23T13:24:01,273 INFO [M:0;ba2e440802a7:46617 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-23T13:24:01,273 INFO [M:0;ba2e440802a7:46617 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46617 2024-11-23T13:24:01,275 DEBUG [M:0;ba2e440802a7:46617 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/ba2e440802a7,46617,1732368060590 already deleted, retry=false 2024-11-23T13:24:01,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T13:24:01,377 INFO [M:0;ba2e440802a7:46617 {}] regionserver.HRegionServer(1307): Exiting; stopping=ba2e440802a7,46617,1732368060590; zookeeper connection closed. 2024-11-23T13:24:01,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46617-0x100248a866f0000, quorum=127.0.0.1:51875, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T13:24:01,382 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T13:24:01,384 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T13:24:01,384 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T13:24:01,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T13:24:01,384 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.log.dir/,STOPPED} 2024-11-23T13:24:01,387 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T13:24:01,387 WARN [BP-413237464-172.17.0.2-1732368057776 heartbeating to localhost/127.0.0.1:34115 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T13:24:01,387 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T13:24:01,387 WARN [BP-413237464-172.17.0.2-1732368057776 heartbeating to localhost/127.0.0.1:34115 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-413237464-172.17.0.2-1732368057776 (Datanode Uuid 678b9c6e-6dcc-4ccd-bcce-5c58caaec29d) service to localhost/127.0.0.1:34115 2024-11-23T13:24:01,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data1/current/BP-413237464-172.17.0.2-1732368057776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T13:24:01,389 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/cluster_c9da1ea8-f465-6e7b-696b-5e558e5f7ca2/dfs/data/data2/current/BP-413237464-172.17.0.2-1732368057776 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T13:24:01,390 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-23T13:24:01,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T13:24:01,398 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T13:24:01,398 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T13:24:01,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T13:24:01,398 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/342937a9-dda7-d088-0c8f-015e3d805ddd/hadoop.log.dir/,STOPPED} 2024-11-23T13:24:01,414 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-23T13:24:01,536 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down